]> git.pld-linux.org Git - packages/kernel.git/blob - linux-2.4.20-afs.patch
- raw http://vserver.13thfloor.at/Experimental/patch-2.6.10-vs1.9.3.17.diff
[packages/kernel.git] / linux-2.4.20-afs.patch
1 diff -urNp linux-5240/fs/afs/cache.c linux-5250/fs/afs/cache.c
2 --- linux-5240/fs/afs/cache.c   1970-01-01 01:00:00.000000000 +0100
3 +++ linux-5250/fs/afs/cache.c   
4 @@ -0,0 +1,664 @@
5 +/* cache.c: AFS local cache management
6 + *
7 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
8 + * Written by David Howells (dhowells@redhat.com)
9 + *
10 + * This program is free software; you can redistribute it and/or
11 + * modify it under the terms of the GNU General Public License
12 + * as published by the Free Software Foundation; either version
13 + * 2 of the License, or (at your option) any later version.
14 + */
15 +
16 +#include <linux/kernel.h>
17 +#include <linux/module.h>
18 +#include <linux/init.h>
19 +#include <linux/slab.h>
20 +#include <linux/fs.h>
21 +#include <linux/namei.h>
22 +#include <linux/pagemap.h>
23 +#include <linux/devfs_fs_kernel.h>
24 +#include <linux/buffer_head.h>
25 +#include "cell.h"
26 +#include "cmservice.h"
27 +#include "fsclient.h"
28 +#include "cache.h"
29 +#include "volume.h"
30 +#include "vnode.h"
31 +#include "internal.h"
32 +
33 +static LIST_HEAD(afs_cache_list);
34 +static DECLARE_MUTEX(afs_cache_list_sem);
35 +
36 +static int afs_cache_read_sig(afs_cache_t *cache);
37 +
38 +/*****************************************************************************/
39 +/*
40 + * stat a cache device to find its device numbers
41 + */
42 +static int afs_cache_get_kdev(const char *cachename, kdev_t *_kdev, struct file **_bdfile)
43 +{
44 +       struct nameidata nd;
45 +       struct inode *inode;
46 +       struct file *bdfile;
47 +       int ret;
48 +
49 +       /* look up the cache device file */
50 +       if (!cachename)
51 +               return -EINVAL;
52 +
53 +       ret = path_lookup(cachename,LOOKUP_FOLLOW,&nd);
54 +       if (ret)
55 +               return ret;
56 +
57 +       /* check it's a block device file */
58 +       inode = nd.dentry->d_inode;
59 +       ret = -ENOTBLK;
60 +       if (!S_ISBLK(inode->i_mode)) {
61 +               path_release(&nd);
62 +               return ret;
63 +       }
64 +
65 +       /* open a file for it */
66 +       bdfile = dentry_open(nd.dentry,nd.mnt,O_RDWR);
67 +       if (IS_ERR(bdfile))
68 +               return ret;
69 +
70 +       *_kdev = inode->i_rdev;
71 +       *_bdfile = bdfile;
72 +       return 0;
73 +} /* end afs_cache_get_kdev() */
74 +
75 +/*****************************************************************************/
76 +/*
77 + * open a cache device
78 + */
79 +int afs_cache_open(const char *cachename, afs_cache_t **_cache)
80 +{
81 +       struct list_head *_p;
82 +       afs_cache_t *cache, *ncache;
83 +       kdev_t dev;
84 +       int ret = 0;
85 +
86 +       _enter("{%s}",cachename);
87 +
88 +       BUG();
89 +
90 +       /* pre-allocate a cache record */
91 +       ret = -ENOMEM;
92 +       ncache = kmalloc(sizeof(*ncache),GFP_KERNEL);
93 +       if (!ncache) {
94 +               _leave(" = %d [lookup failed]",ret);
95 +               return ret;
96 +       }
97 +       memset(ncache,0,sizeof(*ncache));
98 +
99 +       atomic_set(&ncache->usage,1);
100 +       INIT_LIST_HEAD(&ncache->link);
101 +       init_rwsem(&ncache->sem);
102 +
103 +       /* lookup the block device */
104 +       ret = afs_cache_get_kdev(cachename,&dev,&ncache->bdfile);
105 +       if (ret<0) {
106 +               kfree(ncache);
107 +               _leave(" = %d [lookup failed]",ret);
108 +               return ret;
109 +       }
110 +
111 +       ncache->dev = dev;
112 +
113 +       /* see if we've already got the cache open */
114 +       cache = NULL;
115 +       down(&afs_cache_list_sem);
116 +
117 +       list_for_each(_p,&afs_cache_list) {
118 +               cache = list_entry(_p,afs_cache_t,link);
119 +               if (kdev_same(cache->dev,dev))
120 +                       goto found;
121 +       }
122 +       goto not_found;
123 +
124 +       /* we already have the cache open */
125 + found:
126 +       kdebug("kAFS re-using cache block dev %s",kdevname(dev));
127 +       filp_close(cache->bdfile,NULL);
128 +       kfree(ncache);
129 +       ncache = NULL;
130 +       afs_get_cache(cache);
131 +       goto success;
132 +
133 +       /* we don't already have the cache open */
134 + not_found:
135 +       kdebug("kAFS using cache block dev %s",kdevname(dev));
136 +       cache = ncache;
137 +       ncache = NULL;
138 +
139 +       /* grab a handle to the block device */
140 +       ret = -ENOMEM;
141 +       cache->bdev = bdget(kdev_t_to_nr(dev));
142 +       if (!cache->bdev)
143 +               goto out;
144 +
145 +       /* open the block device node */
146 +       ret = blkdev_get(cache->bdev,FMODE_READ|FMODE_WRITE,0,BDEV_RAW);
147 +       if (ret)
148 +               goto out;
149 +
150 +       /* quick insanity check */
151 +       check_disk_change(cache->dev);
152 +       ret = -EACCES;
153 +       if (is_read_only(cache->dev))
154 +               goto out;
155 +
156 +       /* mark it as mine */
157 +       ret = bd_claim(cache->bdev,cache);
158 +       if (ret)
159 +               goto out;
160 +
161 +       /* check it */
162 +       ret = afs_cache_read_sig(cache);
163 +       if (ret<0)
164 +               goto out_unclaim;
165 +
166 +       list_add_tail(&cache->link,&afs_cache_list);
167 +
168 + success:
169 +       *_cache = cache;
170 +       up(&afs_cache_list_sem);
171 +       _leave(" = 0 (%p{%x})",cache->bdev,kdev_t_to_nr(cache->dev));
172 +       return 0;
173 +
174 + out_unclaim:
175 +       bd_release(cache->bdev);
176 + out:
177 +       if (cache->bdfile)
178 +               filp_close(cache->bdfile,NULL);
179 +       if (cache->bdev) {
180 +               blkdev_put(cache->bdev,BDEV_RAW);
181 +               cache->bdev = NULL;
182 +       }
183 +
184 +       kfree(cache);
185 +
186 +       up(&afs_cache_list_sem);
187 +       _leave(" = %d",ret);
188 +       return ret;
189 +
190 +} /* end afs_cache_open() */
191 +
192 +/*****************************************************************************/
193 +/*
194 + * release a cache device
195 + */
196 +void afs_put_cache(afs_cache_t *cache)
197 +{
198 +       _enter("%p{u=%d}",cache,atomic_read(&cache->usage));
199 +
200 +       down(&afs_cache_list_sem);
201 +
202 +       if (!atomic_dec_and_test(&cache->usage))
203 +               cache = NULL;
204 +       else
205 +               list_del(&cache->link);
206 +
207 +       up(&afs_cache_list_sem);
208 +
209 +       /* if that was the last ref, then release the kernel resources */
210 +       if (cache) {
211 +               kdebug("kAFS releasing cache block dev %s",kdevname(cache->dev));
212 +               filp_close(cache->bdfile,NULL);
213 +               bd_release(cache->bdev);
214 +               blkdev_put(cache->bdev,BDEV_RAW);
215 +               kfree(cache);
216 +       }
217 +
218 +       _leave("");
219 +} /* end afs_put_cache() */
220 +
221 +/*****************************************************************************/
222 +/*
223 + * read the cache signature block from the cache device
224 + */
225 +static int afs_cache_read_sig(afs_cache_t *cache)
226 +{
227 +       struct afs_cache_super_block *csb;
228 +       struct buffer_head *bh;
229 +
230 +       bh = __bread(cache->bdev,0,PAGE_CACHE_SIZE);
231 +       if (!bh)
232 +               return -EIO;
233 +
234 +       csb = (struct afs_cache_super_block*) bh->b_data;
235 +
236 +       /* validate the cache superblock */
237 +       if (memcmp(csb->magic,AFS_CACHE_SUPER_MAGIC,sizeof(csb->magic))!=0) {
238 +               printk("kAFS cache magic string doesn't match\n");
239 +               return -EINVAL;
240 +       }
241 +       if (csb->endian!=AFS_CACHE_SUPER_ENDIAN) {
242 +               printk("kAFS endian spec doesn't match (%hx not %hx)\n",
243 +                      csb->endian,AFS_CACHE_SUPER_ENDIAN);
244 +               return -EINVAL;
245 +       }
246 +       if (csb->version!=AFS_CACHE_SUPER_VERSION) {
247 +               printk("kAFS version doesn't match (%u not %u)\n",
248 +                      csb->version,AFS_CACHE_SUPER_VERSION);
249 +               return -EINVAL;
250 +       }
251 +
252 +       /* copy the layout into the cache management structure */
253 +       memcpy(&cache->layout,csb,sizeof(cache->layout));
254 +
255 +       brelse(bh);
256 +       return 0;
257 +} /* end afs_cache_read_sig() */
258 +
259 +/*****************************************************************************/
260 +/*
261 + * update part of one page in the cache
262 + * - the caller must hold any required protective locks
263 + * - based on rw_swap_page_base()
264 + */
265 +static int afs_cache_update_region(afs_cache_t *cache, afs_cache_bix_t bix,
266 +                                  unsigned off, size_t size, void *buf)
267 +{
268 +       mm_segment_t oldfs;
269 +       loff_t pos;
270 +       int ret;
271 +
272 +       _enter("%s,%u,%u,%u,",kdevname(cache->dev),bix,off,size);
273 +
274 +       pos = bix*cache->layout.bsize + off;
275 +
276 +       oldfs = get_fs();
277 +       set_fs(KERNEL_DS);
278 +       ret = generic_file_write(cache->bdfile,buf,size,&pos);
279 +       set_fs(oldfs);
280 +
281 +       if (ret>0)
282 +               ret = 0;
283 +
284 +       _leave(" = %d",ret);
285 +       return ret;
286 +} /* end afs_cache_update_region() */
287 +
288 +/*****************************************************************************/
289 +/*
290 + * look up cell information in the cache
291 + * - mkafscache preloads /etc/sysconfig/kafs/cell-serv-db into the cache
292 + */
293 +int afs_cache_lookup_cell(afs_cache_t *cache,
294 +                         afs_cell_t *cell)
295 +{
296 +       struct afs_cache_cell_block *ccells;
297 +       struct afs_cache_cell *ccell;
298 +       struct buffer_head *bh;
299 +       afs_cache_cellix_t cix, stop, rem;
300 +       afs_cache_bix_t bix;
301 +       int loop;
302 +
303 +       _enter("%s,%s",kdevname(cache->dev),cell->name);
304 +
305 +       BUG();
306 +
307 +       rem = cache->layout.ncells;
308 +
309 +       for (bix=cache->layout.off_cell_cache; bix<cache->layout.off_volume_bitmap; bix++) {
310 +               /* read the next block */
311 +               bh = __bread(cache->bdev,bix,PAGE_CACHE_SIZE);
312 +               if (!bh) {
313 +                       kleave(" = -EIO (block %u)",bix);
314 +                       return -EIO;
315 +               }
316 +
317 +               ccells = (struct afs_cache_cell_block*) bh->b_data;
318 +
319 +               /* and scan it */
320 +               stop = min((size_t)rem,
321 +                          sizeof(struct afs_cache_cell_block)/sizeof(struct afs_cache_cell));
322 +               rem -= stop;
323 +
324 +               for (cix=0; cix<stop; cix++) {
325 +                       ccell = &ccells->entries[cix];
326 +                       if (strncmp(cell->name,ccell->name,sizeof(ccell->name))==0)
327 +                               goto found;
328 +               }
329 +
330 +               brelse(bh);
331 +       }
332 +
333 +       _leave(" = -ENOENT");
334 +       return -ENOENT;
335 +
336 + found:
337 +       /* found the cell record - copy out the details */
338 +       bix -= cache->layout.off_cell_cache;
339 +       cell->cache_ix = cix;
340 +       cell->cache_ix += bix * sizeof(struct afs_cache_cell_block)/sizeof(struct afs_cache_cell);
341 +
342 +       memcpy(cell->vl_addrs,ccell->servers,sizeof(cell->vl_addrs));
343 +
344 +       for (loop=0; loop<sizeof(cell->vl_addrs)/sizeof(cell->vl_addrs[0]); loop++)
345 +               if (!cell->vl_addrs[loop].s_addr)
346 +                       break;
347 +       cell->vl_naddrs = loop;
348 +
349 +       brelse(bh);
350 +       _leave(" = 0 (bix=%u cix=%u ccix=%u)",bix,cix,cell->cache_ix);
351 +       return 0;
352 +
353 +} /* end afs_cache_lookup_cell() */
354 +
355 +/*****************************************************************************/
356 +/*
357 + * search for a volume location record in the cache
358 + */
359 +int afs_cache_lookup_vlocation(afs_vlocation_t *vlocation)
360 +{
361 +       BUG();
362 +
363 +#if 0
364 +       struct afs_cache_volume_block *cvols;
365 +       struct afs_cache_volume *cvol;
366 +       struct buffer_head *bh;
367 +       afs_cache_bix_t bix;
368 +       unsigned rem, stop, ix;
369 +
370 +       _enter("%s,{v=%s cix=%u}",
371 +              kdevname(vlocation->cache->dev),vlocation->vldb.name,vlocation->vldb.cell_ix);
372 +
373 +       rem = vlocation->cache->layout.nvols;
374 +
375 +       for (bix=vlocation->cache->layout.off_volume_cache;
376 +            bix<vlocation->cache->layout.off_vnode_bitmap;
377 +            bix++
378 +            ) {
379 +               /* read the next block */
380 +               bh = __bread(vlocation->cache->bdev,bix,PAGE_CACHE_SIZE);
381 +               if (!bh) {
382 +                       kleave(" = -EIO (block %u)",bix);
383 +                       return -EIO;
384 +               }
385 +
386 +               cvols = (struct afs_cache_volume_block*) bh->b_data;
387 +
388 +               /* and scan it */
389 +               stop = min((size_t)rem,sizeof(*cvols)/sizeof(*cvol));
390 +               rem -= stop;
391 +
392 +               for (ix=0; ix<stop; ix++) {
393 +                       cvol = &cvols->entries[ix];
394 +                       if (cvol->name[0])
395 +                               _debug("FOUND[%u.%u]: cell %u vol '%s' %08x",
396 +                                      bix,ix,cvol->cell_ix,cvol->name,cvol->vid[0]);
397 +                       if (cvol->cell_ix==vlocation->vldb.cell_ix &&
398 +                           memcmp(vlocation->vldb.name,cvol->name,sizeof(cvol->name))==0) {
399 +                               goto found;
400 +                       }
401 +               }
402 +
403 +               brelse(bh);
404 +       }
405 +
406 +       _leave(" = %d",-ENOENT);
407 +       return -ENOENT;
408 +
409 + found:
410 +       /* found the cell record */
411 +       memcpy(&vlocation->vldb,cvol,sizeof(*cvol));
412 +       brelse(bh);
413 +
414 +       /* note the volume ID */
415 +       bix -= vlocation->cache->layout.off_volume_cache;
416 +       vlocation->vix.index = (ix + bix * (sizeof(*cvols)/sizeof(*cvol))) << 2;
417 +
418 +       _leave(" = 0 (bix=%u ix=%u vix=%hu)",bix,ix,vlocation->vix.index);
419 +#endif
420 +       return 0;
421 +
422 +} /* end afs_cache_lookup_vlocation() */
423 +
424 +/*****************************************************************************/
425 +/*
426 + * search for a volume location record in the cache, and if one's not available then reap the
427 + * eldest not currently in use
428 + */
429 +int afs_cache_update_vlocation(afs_vlocation_t *vlocation)
430 +{
431 +       BUG();
432 +
433 +#if 0
434 +       struct afs_cache_volume_block *cvols;
435 +       struct afs_cache_volume *cvol;
436 +       struct buffer_head *bh;
437 +       afs_cache_bix_t bix;
438 +       unsigned rem, stop, ix, candidate, tmp;
439 +       time_t cand_age;
440 +       int ret;
441 +
442 +
443 +       _enter("%s,{v=%s cix=%u}",
444 +              kdevname(vlocation->cache->dev),vlocation->vldb.name,vlocation->vldb.cell_ix);
445 +
446 +       candidate = UINT_MAX;
447 +       cand_age = ULONG_MAX;
448 +       rem = vlocation->cache->layout.nvols;
449 +
450 +       for (bix=vlocation->cache->layout.off_volume_cache;
451 +            bix<vlocation->cache->layout.off_vnode_bitmap;
452 +            bix++
453 +            ) {
454 +               /* read the next block */
455 +               bh = __bread(vlocation->cache->bdev,bix,PAGE_CACHE_SIZE);
456 +               if (!bh) {
457 +                       kleave(" = -EIO (block %u)",bix);
458 +                       return -EIO;
459 +               }
460 +
461 +               cvols = (struct afs_cache_volume_block*) bh->b_data;
462 +
463 +               /* and scan it */
464 +               stop = min((size_t)rem,sizeof(*cvols)/sizeof(*cvol));
465 +               rem -= stop;
466 +
467 +               for (ix=0; ix<stop; ix++) {
468 +                       cvol = &cvols->entries[ix];
469 +                       if (cvol->name[0])
470 +                               _debug("FOUND[%u.%u]: cell %u vol '%s' %08x",
471 +                                      bix,ix,cvol->cell_ix,cvol->name,cvol->vid[0]);
472 +                       if (cvol->cell_ix==vlocation->vldb.cell_ix &&
473 +                           memcmp(vlocation->vldb.name,cvol->name,sizeof(cvol->name))==0) {
474 +                               goto found;
475 +                       }
476 +
477 +                       if (candidate!=UINT_MAX && cvol->ctime<cand_age) {
478 +                               /* TODO: don't recycle volumes currently in use */
479 +                               cand_age = cvol->ctime;
480 +                               candidate = bix - vlocation->cache->layout.off_volume_cache;
481 +                               candidate = ix + candidate * sizeof(*cvols)/sizeof(*cvol);
482 +                       }
483 +               }
484 +
485 +               brelse(bh);
486 +       }
487 +
488 +       /* TODO: recycle old entry if no spare slots available */
489 +       if (vlocation->cache->layout.nvols>=vlocation->cache->layout.maxvols)
490 +               BUG();
491 +
492 +       /* insert new entry */
493 +       ix = vlocation->vix.index = vlocation->cache->layout.nvols++;
494 +       tmp = (sizeof(*cvols)/sizeof(*cvol));
495 +       bix = ix / tmp + vlocation->cache->layout.off_volume_cache;
496 +       ix %= tmp;
497 +
498 +       kdebug("INSERT (bix=%u ix=%u)",bix,ix);
499 +       ret = afs_cache_update_region(vlocation->cache,
500 +                                     bix,
501 +                                     ix*sizeof(*cvol),
502 +                                     sizeof(*cvol),
503 +                                     &vlocation->vldb);
504 +       if (ret<0)
505 +               goto out;
506 +
507 +       /* update the superblock */
508 +       ret = afs_cache_update_region(vlocation->cache,
509 +                                     0,0,
510 +                                     sizeof(vlocation->cache->layout),
511 +                                     &vlocation->cache->layout);
512 +
513 +       /* TODO: handle failure by winding back cache->layout.nvols */
514 +
515 + out:
516 +       _leave(" = %d (bix=%u ix=%u vix=%hu)",ret,bix,ix,vlocation->vix.index);
517 +       return ret;
518 +
519 + found:
520 +       brelse(bh);
521 +
522 +       /* update the on-disk cache with the latest news */
523 +       _debug("UPDATE (bix=%u ix=%u)",bix,ix);
524 +       ret = afs_cache_update_region(vlocation->cache,
525 +                                     bix,
526 +                                     ix*sizeof(*cvol),
527 +                                     sizeof(*cvol),
528 +                                     &vlocation->vldb);
529 +       if (ret<0)
530 +               goto out;
531 +
532 +       /* found the cell record - note the volume ID */
533 +       bix -= vlocation->cache->layout.off_volume_cache;
534 +       vlocation->vix.index = (ix + bix * (sizeof(*cvols)/sizeof(*cvol))) << 2;
535 +
536 +       _leave(" = 0 (bix=%u ix=%u vix=%hu)",bix,ix,vlocation->vix.index);
537 +#endif
538 +       return 0;
539 +
540 +} /* end afs_cache_update_vlocation() */
541 +
542 +/*****************************************************************************/
543 +/*
544 + * search for a vnode record in the cache, and if one's not available then reap the
545 + * eldest not currently in use
546 + */
547 +int afs_cache_lookup_vnode(afs_volume_t *volume, afs_vnode_t *vnode)
548 +{
549 +       BUG();
550 +
551 +#if 0
552 +       struct afs_cache_vnode_index_block *cindexb;
553 +       struct afs_cache_vnode_index cindex;
554 +       struct buffer_head *bh;
555 +       afs_cache_bix_t bix;
556 +       unsigned rem, stop, ix, candidate, tmp;
557 +       time_t cand_age;
558 +       int ret;
559 +
560 +       _enter("{cix=%u vix=%u},{%u,%u,%u}",
561 +              volume->cix,volume->vix.index,vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
562 +
563 +       candidate = UINT_MAX;
564 +       cand_age = ULONG_MAX;
565 +       rem = volume->cache->layout.nvnodes;
566 +
567 +       for (bix=volume->cache->layout.off_vnode_index;
568 +            bix<volume->cache->layout.off_vnode_cache;
569 +            bix++
570 +            ) {
571 +               /* read the next block */
572 +               bh = __bread(volume->cache->bdev,bix,PAGE_CACHE_SIZE);
573 +               if (!bh) {
574 +                       kleave(" = -EIO (block %u)",bix);
575 +                       return -EIO;
576 +               }
577 +
578 +               cindexb = (struct afs_cache_vnode_index_block*) bh->b_data;
579 +
580 +               /* and scan it */
581 +               stop = min((size_t)rem,AFS_CACHE_VNODE_INDEX_PER_BLOCK);
582 +               rem -= stop;
583 +
584 +               for (ix=0; ix<stop; ix++) {
585 +                       memcpy(&cindex,&cindexb->index[ix],sizeof(cindex));
586 +
587 +#if 0
588 +                       if (cindex.vnode>0)
589 +                               kdebug("FOUND[%u.%u]: vix %u vnode %u",
590 +                                      bix,ix,cindex.volume_ix.index,cindex.vnode);
591 +#endif
592 +
593 +                       if (cindex.vnode==vnode->fid.vnode &&
594 +                           cindex.volume_ix.index==volume->vix.index)
595 +                               goto found;
596 +
597 +                       if (candidate!=UINT_MAX && cindex.atime<cand_age) {
598 +                               /* TODO: don't recycle volumes currently in use */
599 +                               cand_age = cindex.atime;
600 +                               candidate = bix - volume->cache->layout.off_vnode_index;
601 +                               candidate = ix + candidate * AFS_CACHE_VNODE_INDEX_PER_BLOCK;
602 +                       }
603 +               }
604 +
605 +               brelse(bh);
606 +       }
607 +
608 +       /* TODO: recycle old entry if no spare slots available */
609 +       if (volume->cache->layout.nvnodes>=volume->cache->layout.maxvnodes)
610 +               BUG();
611 +
612 +       /* append new entry */
613 +       vnode->nix = volume->cache->layout.nvnodes++;
614 +
615 +       cindex.vnode = vnode->fid.vnode;
616 +       cindex.atime = xtime.tv_sec;
617 +       cindex.volume_ix = volume->vix;
618 +
619 +       ix = vnode->nix;
620 +       tmp = AFS_CACHE_VNODE_INDEX_PER_BLOCK;
621 +       bix = ix / tmp + volume->cache->layout.off_vnode_index;
622 +       ix %= tmp;
623 +
624 +       _debug("CACHE APPEND VNODE %u (bix=%u ix=%u)",vnode->nix,bix,ix);
625 +       ret = afs_cache_update_region(volume->cache,
626 +                                     bix,
627 +                                     ix*sizeof(cindex),
628 +                                     sizeof(cindex),
629 +                                     &cindex);
630 +       if (ret<0)
631 +               goto out;
632 +
633 +       /* update the superblock */
634 +       ret = afs_cache_update_region(volume->cache,
635 +                                     0,0,
636 +                                     sizeof(volume->cache->layout),
637 +                                     &volume->cache->layout);
638 +
639 +       /* TODO: handle failure by winding back cache->layout.nvnodes */
640 +
641 + out:
642 +       _leave(" = %d (bix=%u ix=%u nix=%u)",ret,bix,ix,vnode->nix);
643 +       return ret;
644 +
645 + found:
646 +       brelse(bh);
647 +
648 +       cindex.atime = xtime.tv_sec;
649 +
650 +       /* update the on-disk cache with the latest news */
651 +       _debug("UPDATE (bix=%u ix=%u)",bix,ix);
652 +       ret = afs_cache_update_region(volume->cache,
653 +                                     bix,
654 +                                     ix*sizeof(cindex),
655 +                                     sizeof(cindex),
656 +                                     &cindex);
657 +       if (ret<0)
658 +               goto out;
659 +
660 +       /* found the cell record - note the volume ID */
661 +       bix -= volume->cache->layout.off_vnode_index;
662 +       vnode->nix = ix + bix * AFS_CACHE_VNODE_INDEX_PER_BLOCK;
663 +
664 +       _leave(" = 0 (bix=%u ix=%u nix=%u)",bix,ix,vnode->nix);
665 +#endif
666 +       return 0;
667 +
668 +} /* end afs_cache_lookup_vnode() */
669 diff -urNp linux-5240/fs/afs/cache.h linux-5250/fs/afs/cache.h
670 --- linux-5240/fs/afs/cache.h   1970-01-01 01:00:00.000000000 +0100
671 +++ linux-5250/fs/afs/cache.h   
672 @@ -0,0 +1,48 @@
673 +/* cache.h: AFS local cache management
674 + *
675 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
676 + * Written by David Howells (dhowells@redhat.com)
677 + *
678 + * This program is free software; you can redistribute it and/or
679 + * modify it under the terms of the GNU General Public License
680 + * as published by the Free Software Foundation; either version
681 + * 2 of the License, or (at your option) any later version.
682 + */
683 +
684 +#ifndef _LINUX_AFS_CACHE_H
685 +#define _LINUX_AFS_CACHE_H
686 +
687 +#include <linux/fs.h>
688 +#include "cache-layout.h"
689 +
690 +#ifdef __KERNEL__
691 +
692 +/*****************************************************************************/
693 +/*
694 + * AFS cache management record
695 + */
696 +struct afs_cache
697 +{
698 +       atomic_t                        usage;          /* usage count */
699 +       struct list_head                link;           /* link in cache list */
700 +       kdev_t                          dev;            /* device numbers */
701 +       struct block_device             *bdev;          /* block device */
702 +       struct file                     *bdfile;        /* file attached to block device */
703 +       struct rw_semaphore             sem;            /* access semaphore */
704 +       struct afs_cache_super_block    layout;         /* layout description */
705 +};
706 +
707 +extern int afs_cache_open(const char *name, afs_cache_t **_cache);
708 +
709 +#define afs_get_cache(C) do { atomic_inc(&(C)->usage); } while(0)
710 +
711 +extern void afs_put_cache(afs_cache_t *cache);
712 +
713 +extern int afs_cache_lookup_cell(afs_cache_t *cache, afs_cell_t *cell);
714 +extern int afs_cache_lookup_vlocation(afs_vlocation_t *vlocation);
715 +extern int afs_cache_update_vlocation(afs_vlocation_t *vlocation);
716 +extern int afs_cache_lookup_vnode(afs_volume_t *volume, afs_vnode_t *vnode);
717 +
718 +#endif /* __KERNEL__ */
719 +
720 +#endif /* _LINUX_AFS_CACHE_H */
721 diff -urNp linux-5240/fs/afs/cache-layout.h linux-5250/fs/afs/cache-layout.h
722 --- linux-5240/fs/afs/cache-layout.h    1970-01-01 01:00:00.000000000 +0100
723 +++ linux-5250/fs/afs/cache-layout.h    
724 @@ -0,0 +1,231 @@
725 +/* cache-layout.h: AFS cache layout
726 + *
727 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
728 + * Written by David Howells (dhowells@redhat.com)
729 + *
730 + * This program is free software; you can redistribute it and/or
731 + * modify it under the terms of the GNU General Public License
732 + * as published by the Free Software Foundation; either version
733 + * 2 of the License, or (at your option) any later version.
734 + *
735 + *
736 + * The cache is stored on a block device and is laid out as follows:
737 + *
738 + *  0  +------------------------------------------------
739 + *     |
740 + *     |  SuperBlock
741 + *     |
742 + *  1  +------------------------------------------------
743 + *     |
744 + *     |  Cell Cache (preloaded by mkafscache)
745 + *     |
746 + *     +------------------------------------------------
747 + *     |
748 + *     |  Volume Cache Allocation BitMap (1 page)
749 + *     |
750 + *     +------------------------------------------------
751 + *     |
752 + *     |  Volume Cache
753 + *     |
754 + *     +------------------------------------------------
755 + *     |
756 + *     |  Vnode Cache Allocation BitMap
757 + *     |
758 + *     +------------------------------------------------
759 + *     |
760 + *     |  Vnode Cache Index
761 + *     |
762 + *     +------------------------------------------------
763 + *     |
764 + *     |  Vnode Cache
765 + *     |
766 + *     +------------------------------------------------
767 + *     |
768 + *     |  Data Cache Allocation BitMap
769 + *     |
770 + *     +------------------------------------------------
771 + *     |
772 + *     |  Data Cache
773 + *     |
774 + *  End        +------------------------------------------------
775 + *
776 + */
777 +
778 +#ifndef _LINUX_AFS_CACHE_LAYOUT_H
779 +#define _LINUX_AFS_CACHE_LAYOUT_H
780 +
781 +#include "types.h"
782 +
783 +typedef unsigned afs_cache_bix_t;
784 +typedef unsigned short afs_cache_cellix_t;
785 +
786 +typedef struct { unsigned short index; } afs_cache_volix_t;
787 +
788 +/*****************************************************************************/
789 +/*
790 + * cache superblock block layout
791 + */
792 +struct afs_cache_super_block
793 +{
794 +       char                    magic[10];      /* magic number */
795 +#define AFS_CACHE_SUPER_MAGIC "kafscache"
796 +
797 +       unsigned short          endian;         /* 0x1234 stored CPU-normal order */
798 +#define AFS_CACHE_SUPER_ENDIAN 0x1234
799 +
800 +       unsigned                version;        /* format version */
801 +#define AFS_CACHE_SUPER_VERSION 1
802 +
803 +       /* accounting */
804 +       afs_cache_cellix_t      ncells;         /* number of cells cached */
805 +       afs_cache_cellix_t      maxcells;       /* max number of cells cacheable */
806 +       afs_cache_cellix_t      thiscell;       /* index of this cell in cache */
807 +       unsigned short          nvols;          /* volume cache usage */
808 +       unsigned short          maxvols;        /* maximum number of volumes cacheable */
809 +       unsigned                nvnodes;        /* vnode cache usage */
810 +       unsigned                maxvnodes;      /* maximum number of vnodes cacheable */
811 +
812 +       /* layout */
813 +       unsigned                bsize;                  /* cache block size */
814 +       afs_cache_bix_t         off_cell_cache;         /* block offset of cell cache */
815 +       afs_cache_bix_t         off_volume_bitmap;      /* block offset of volume alloc bitmap */
816 +       afs_cache_bix_t         off_volume_cache;       /* block offset of volume cache */
817 +       afs_cache_bix_t         off_vnode_bitmap;       /* block offset of vnode alloc bitmap */
818 +       afs_cache_bix_t         off_vnode_index;        /* block offset of vnode index */
819 +       afs_cache_bix_t         off_vnode_cache;        /* block offset of vnode cache */
820 +       afs_cache_bix_t         off_data_bitmap;        /* block offset of data bitmap */
821 +       afs_cache_bix_t         off_data_cache;         /* block offset of data cache */
822 +       afs_cache_bix_t         off_end;                /* block offset of end of cache */
823 +};
824 +
825 +/*****************************************************************************/
826 +/*
827 + * cached cell info
828 + */
829 +struct afs_cache_cell
830 +{
831 +       char                    name[64];       /* cell name (padded with NULs) */
832 +       struct in_addr          servers[16];    /* cached cell servers */
833 +};
834 +
835 +struct afs_cache_cell_block
836 +{
837 +       struct afs_cache_cell entries[PAGE_SIZE/sizeof(struct afs_cache_cell)];
838 +};
839 +
840 +/*****************************************************************************/
841 +/*
842 + * cached volume info
843 + * - indexed by (afs_cache_volix_t/4)
844 + * - (afs_cache_volix_t%4) is 0 for R/W, 1 for R/O and 2 for Bak (3 is not used)
845 + */
846 +struct afs_cache_volume
847 +{
848 +       char                    name[64];       /* volume name (padded with NULs) */
849 +       afs_volid_t             vid[3];         /* volume IDs for R/W, R/O and Bak volumes */
850 +       unsigned char           vidmask;        /* voltype mask for vid[] */
851 +       unsigned char           _pad[1];
852 +       unsigned short          nservers;       /* number of entries used in servers[] */
853 +       struct in_addr          servers[8];     /* fileserver addresses */
854 +       unsigned char           srvtmask[8];    /* voltype masks for servers[] */
855 +#define AFS_CACHE_VOL_STM_RW   0x01 /* server holds a R/W version of the volume */
856 +#define AFS_CACHE_VOL_STM_RO   0x02 /* server holds a R/O version of the volume */
857 +#define AFS_CACHE_VOL_STM_BAK  0x04 /* server holds a backup version of the volume */
858 +
859 +       afs_cache_cellix_t      cell_ix;        /* cell cache index (MAX_UINT if unused) */
860 +       time_t                  ctime;          /* time at which cached */
861 +};
862 +
863 +struct afs_cache_volume_block
864 +{
865 +       struct afs_cache_volume entries[PAGE_SIZE/sizeof(struct afs_cache_volume)];
866 +};
867 +
868 +/*****************************************************************************/
869 +/*
870 + * cached vnode index
871 + * - map on a 1:1 basis with the vnode index table
872 + */
873 +struct afs_cache_vnode_index
874 +{
875 +       afs_vnodeid_t           vnode;          /* vnode ID */
876 +       time_t                  atime;          /* last time accessed */
877 +       afs_cache_volix_t       volume_ix;      /* volume cache index */
878 +} __attribute__((packed));
879 +
880 +#define AFS_CACHE_VNODE_INDEX_PER_BLOCK ((size_t)(PAGE_SIZE/sizeof(struct afs_cache_vnode_index)))
881 +
882 +struct afs_cache_vnode_index_block
883 +{
884 +       struct afs_cache_vnode_index index[AFS_CACHE_VNODE_INDEX_PER_BLOCK];
885 +};
886 +
887 +/*****************************************************************************/
888 +/*
889 + * cached vnode rights entry
890 + */
891 +struct afs_cache_rights
892 +{
893 +       uid_t                   uid;
894 +       unsigned                access;
895 +       unsigned short          mode;
896 +} __attribute__((packed));
897 +
898 +/*****************************************************************************/
899 +/*
900 + * vnode (inode) metadata cache
901 + * - PAGE_SIZE in size
902 + */
903 +struct afs_cache_vnode_block
904 +{
905 +       /* file ID */
906 +       unsigned                unique;         /* FID unique */
907 +
908 +       /* file status */
909 +       afs_file_type_t         type;           /* file type */
910 +       unsigned                nlink;          /* link count */
911 +       size_t                  size;           /* file size */
912 +       afs_dataversion_t       version;        /* current data version */
913 +       unsigned                author;         /* author ID */
914 +       unsigned                owner;          /* owner ID */
915 +       unsigned                anon_access;    /* access rights for unauthenticated caller */
916 +       unsigned short          mode;           /* UNIX mode */
917 +       time_t                  mtime;          /* last time server changed data */
918 +       time_t                  cachetime;      /* time at which cached */
919 +
920 +       /* file contents */
921 +       afs_cache_bix_t         pt0_bix;        /* "page table 0" block index */
922 +       afs_cache_bix_t         pgd_bix;        /* "page directory" block index */
923 +
924 +       /* access rights */
925 +       size_t                  nrights;        /* number of cached rights */
926 +       struct afs_cache_rights rights[0];      /* cached access rights buffer */
927 +};
928 +
929 +#define AFS_CACHE_VNODE_MAXRIGHTS \
930 +       ((PAGE_SIZE - sizeof(struct afs_cache_vnode_block)) / sizeof(struct afs_cache_rights))
931 +
932 +/*****************************************************************************/
933 +/*
934 + * vnode data "page directory" block
935 + * - first 1024 pages don't map through here
936 + * - PAGE_SIZE in size
937 + */
938 +struct afs_cache_pgd_block
939 +{
940 +       unsigned                _unused;
941 +       afs_cache_bix_t         pt_bix[1023];   /* "page table" block indices */
942 +};
943 +
944 +/*****************************************************************************/
945 +/*
946 + * vnode data "page table" block
947 + * - PAGE_SIZE in size
948 + */
949 +struct afs_cache_pt_block
950 +{
951 +       afs_cache_bix_t         page_bix[1024]; /* "page" block indices */
952 +};
953 +
954 +
955 +#endif /* _LINUX_AFS_CACHE_LAYOUT_H */
956 diff -urNp linux-5240/fs/afs/callback.c linux-5250/fs/afs/callback.c
957 --- linux-5240/fs/afs/callback.c        1970-01-01 01:00:00.000000000 +0100
958 +++ linux-5250/fs/afs/callback.c        
959 @@ -0,0 +1,166 @@
960 +/*
961 + * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
962 + *
963 + * This software may be freely redistributed under the terms of the
964 + * GNU General Public License.
965 + *
966 + * You should have received a copy of the GNU General Public License
967 + * along with this program; if not, write to the Free Software
968 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
969 + *
970 + * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
971 + *          David Howells <dhowells@redhat.com>
972 + *
973 + */
974 +
975 +#include <linux/kernel.h>
976 +#include <linux/module.h>
977 +#include <linux/init.h>
978 +#include "server.h"
979 +#include "vnode.h"
980 +#include "internal.h"
981 +
982 +/*****************************************************************************/
983 +/*
984 + * allow the fileserver to request callback state (re-)initialisation
985 + */
986 +int SRXAFSCM_InitCallBackState(afs_server_t *server)
987 +{
988 +       struct list_head callbacks;
989 +
990 +       _enter("%p",server);
991 +
992 +       INIT_LIST_HEAD(&callbacks);
993 +
994 +       /* transfer the callback list from the server to a temp holding area */
995 +       spin_lock(&server->cb_lock);
996 +
997 +       list_add(&callbacks,&server->cb_promises);
998 +       list_del_init(&server->cb_promises);
999 +
1000 +       /* munch our way through the list, grabbing the inode, dropping all the locks and regetting
1001 +        * them in the right order
1002 +        */
1003 +       while (!list_empty(&callbacks)) {
1004 +               struct inode *inode;
1005 +               afs_vnode_t *vnode;
1006 +
1007 +               vnode = list_entry(callbacks.next,afs_vnode_t,cb_link);
1008 +               list_del_init(&vnode->cb_link);
1009 +
1010 +               /* try and grab the inode - may fail */
1011 +               inode = igrab(AFS_VNODE_TO_I(vnode));
1012 +               if (inode) {
1013 +                       int release = 0;
1014 +
1015 +                       spin_unlock(&server->cb_lock);
1016 +                       spin_lock(&vnode->lock);
1017 +
1018 +                       if (cmpxchg(&vnode->cb_server,server,NULL)==server) {
1019 +                               afs_kafstimod_del_timer(&vnode->cb_timeout);
1020 +                               spin_lock(&afs_cb_hash_lock);
1021 +                               list_del_init(&vnode->cb_hash_link);
1022 +                               spin_unlock(&afs_cb_hash_lock);
1023 +                               release = 1;
1024 +                       }
1025 +
1026 +                       spin_unlock(&vnode->lock);
1027 +
1028 +                       iput(inode);
1029 +                       if (release) afs_put_server(server);
1030 +
1031 +                       spin_lock(&server->cb_lock);
1032 +               }
1033 +       }
1034 +
1035 +       spin_unlock(&server->cb_lock);
1036 +
1037 +       _leave(" = 0");
1038 +       return 0;
1039 +} /* end SRXAFSCM_InitCallBackState() */
1040 +
1041 +/*****************************************************************************/
1042 +/*
1043 + * allow the fileserver to break callback promises
1044 + */
1045 +int SRXAFSCM_CallBack(afs_server_t *server, size_t count, afs_callback_t callbacks[])
1046 +{
1047 +       struct list_head *_p;
1048 +
1049 +       _enter("%p,%u,",server,count);
1050 +
1051 +       for (; count>0; callbacks++, count--) {
1052 +               struct inode *inode = NULL;
1053 +               afs_vnode_t *vnode = NULL;
1054 +               int valid = 0;
1055 +
1056 +               _debug("- Fid { vl=%08x n=%u u=%u }  CB { v=%u x=%u t=%u }",
1057 +                      callbacks->fid.vid,
1058 +                      callbacks->fid.vnode,
1059 +                      callbacks->fid.unique,
1060 +                      callbacks->version,
1061 +                      callbacks->expiry,
1062 +                      callbacks->type
1063 +                      );
1064 +
1065 +               /* find the inode for this fid */
1066 +               spin_lock(&afs_cb_hash_lock);
1067 +
1068 +               list_for_each(_p,&afs_cb_hash(server,&callbacks->fid)) {
1069 +                       vnode = list_entry(_p,afs_vnode_t,cb_hash_link);
1070 +
1071 +                       if (memcmp(&vnode->fid,&callbacks->fid,sizeof(afs_fid_t))!=0)
1072 +                               continue;
1073 +
1074 +                       /* right vnode, but is it same server? */
1075 +                       if (vnode->cb_server!=server)
1076 +                               break; /* no */
1077 +
1078 +                       /* try and nail the inode down */
1079 +                       inode = igrab(AFS_VNODE_TO_I(vnode));
1080 +                       break;
1081 +               }
1082 +
1083 +               spin_unlock(&afs_cb_hash_lock);
1084 +
1085 +               if (inode) {
1086 +                       /* we've found the record for this vnode */
1087 +                       spin_lock(&vnode->lock);
1088 +                       if (cmpxchg(&vnode->cb_server,server,NULL)==server) {
1089 +                               /* the callback _is_ on the calling server */
1090 +                               valid = 1;
1091 +
1092 +                               afs_kafstimod_del_timer(&vnode->cb_timeout);
1093 +                               vnode->flags |= AFS_VNODE_CHANGED;
1094 +
1095 +                               spin_lock(&server->cb_lock);
1096 +                               list_del_init(&vnode->cb_link);
1097 +                               spin_unlock(&server->cb_lock);
1098 +
1099 +                               spin_lock(&afs_cb_hash_lock);
1100 +                               list_del_init(&vnode->cb_hash_link);
1101 +                               spin_unlock(&afs_cb_hash_lock);
1102 +                       }
1103 +                       spin_unlock(&vnode->lock);
1104 +
1105 +                       if (valid) {
1106 +                               invalidate_inode_pages(inode);
1107 +                               afs_put_server(server);
1108 +                       }
1109 +                       iput(inode);
1110 +               }
1111 +       }
1112 +
1113 +       _leave(" = 0");
1114 +       return 0;
1115 +} /* end SRXAFSCM_CallBack() */
1116 +
1117 +/*****************************************************************************/
1118 +/*
1119 + * allow the fileserver to see if the cache manager is still alive
1120 + */
1121 +int SRXAFSCM_Probe(afs_server_t *server)
1122 +{
1123 +       _debug("SRXAFSCM_Probe(%p)\n",server);
1124 +       return 0;
1125 +} /* end SRXAFSCM_Probe() */
1126 diff -urNp linux-5240/fs/afs/cell.c linux-5250/fs/afs/cell.c
1127 --- linux-5240/fs/afs/cell.c    1970-01-01 01:00:00.000000000 +0100
1128 +++ linux-5250/fs/afs/cell.c    
1129 @@ -0,0 +1,452 @@
1130 +/* cell.c: AFS cell and server record management
1131 + *
1132 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
1133 + * Written by David Howells (dhowells@redhat.com)
1134 + *
1135 + * This program is free software; you can redistribute it and/or
1136 + * modify it under the terms of the GNU General Public License
1137 + * as published by the Free Software Foundation; either version
1138 + * 2 of the License, or (at your option) any later version.
1139 + */
1140 +
1141 +#include <linux/module.h>
1142 +#include <linux/sched.h>
1143 +#include <linux/slab.h>
1144 +#include <rxrpc/peer.h>
1145 +#include <rxrpc/connection.h>
1146 +#include "volume.h"
1147 +#include "cell.h"
1148 +#include "server.h"
1149 +#include "transport.h"
1150 +#include "cache.h"
1151 +#include "vlclient.h"
1152 +#include "kafstimod.h"
1153 +#include "super.h"
1154 +#include "internal.h"
1155 +
1156 +DECLARE_RWSEM(afs_proc_cells_sem);
1157 +LIST_HEAD(afs_proc_cells);
1158 +
1159 +static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells);
1160 +static rwlock_t afs_cells_lock = RW_LOCK_UNLOCKED;
1161 +static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */
1162 +static afs_cell_t *afs_cell_root;
1163 +
1164 +static char *rootcell;
1165 +
1166 +MODULE_PARM(rootcell,"s");
1167 +MODULE_PARM_DESC(rootcell,"root AFS cell name and VL server IP addr list");
1168 +
1169 +/*****************************************************************************/
1170 +/*
1171 + * create a cell record
1172 + * - "name" is the name of the cell
1173 + * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
1174 + */
1175 +int afs_cell_create(const char *name, char *vllist, afs_cell_t **_cell)
1176 +{
1177 +       afs_cell_t *cell;
1178 +       char *next;
1179 +       int ret;
1180 +
1181 +       _enter("%s",name);
1182 +
1183 +       if (!name) BUG(); /* TODO: want to look up "this cell" in the cache */
1184 +
1185 +       down_write(&afs_cells_sem);
1186 +
1187 +       /* allocate and initialise a cell record */
1188 +       cell = kmalloc(sizeof(afs_cell_t) + strlen(name) + 1,GFP_KERNEL);
1189 +       if (!cell) {
1190 +               _leave(" = -ENOMEM");
1191 +               return -ENOMEM;
1192 +       }
1193 +
1194 +       memset(cell,0,sizeof(afs_cell_t));
1195 +       atomic_set(&cell->usage,0);
1196 +
1197 +       INIT_LIST_HEAD(&cell->link);
1198 +
1199 +       rwlock_init(&cell->sv_lock);
1200 +       INIT_LIST_HEAD(&cell->sv_list);
1201 +       INIT_LIST_HEAD(&cell->sv_graveyard);
1202 +       spin_lock_init(&cell->sv_gylock);
1203 +
1204 +       init_rwsem(&cell->vl_sem);
1205 +       INIT_LIST_HEAD(&cell->vl_list);
1206 +       INIT_LIST_HEAD(&cell->vl_graveyard);
1207 +       spin_lock_init(&cell->vl_gylock);
1208 +
1209 +       strcpy(cell->name,name);
1210 +
1211 +       /* fill in the VL server list from the rest of the string */
1212 +       ret = -EINVAL;
1213 +       do {
1214 +               unsigned a, b, c, d;
1215 +
1216 +               next = strchr(vllist,':');
1217 +               if (next) *next++ = 0;
1218 +
1219 +               if (sscanf(vllist,"%u.%u.%u.%u",&a,&b,&c,&d)!=4)
1220 +                       goto badaddr;
1221 +
1222 +               if (a>255 || b>255 || c>255 || d>255)
1223 +                       goto badaddr;
1224 +
1225 +               cell->vl_addrs[cell->vl_naddrs++].s_addr =
1226 +                       htonl((a<<24)|(b<<16)|(c<<8)|d);
1227 +
1228 +               if (cell->vl_naddrs>=16)
1229 +                       break;
1230 +
1231 +       } while(vllist=next, vllist);
1232 +
1233 +       /* add a proc dir for this cell */
1234 +       ret = afs_proc_cell_setup(cell);
1235 +       if (ret<0)
1236 +               goto error;
1237 +
1238 +       /* add to the cell lists */
1239 +       write_lock(&afs_cells_lock);
1240 +       list_add_tail(&cell->link,&afs_cells);
1241 +       write_unlock(&afs_cells_lock);
1242 +
1243 +       down_write(&afs_proc_cells_sem);
1244 +       list_add_tail(&cell->proc_link,&afs_proc_cells);
1245 +       up_write(&afs_proc_cells_sem);
1246 +
1247 +       *_cell = cell;
1248 +       up_write(&afs_cells_sem);
1249 +
1250 +       _leave(" = 0 (%p)",cell);
1251 +       return 0;
1252 +
1253 + badaddr:
1254 +       printk("kAFS: bad VL server IP address: '%s'\n",vllist);
1255 + error:
1256 +       up_write(&afs_cells_sem);
1257 +       kfree(afs_cell_root);
1258 +       return ret;
1259 +} /* end afs_cell_create() */
1260 +
1261 +/*****************************************************************************/
1262 +/*
1263 + * initialise the cell database from module parameters
1264 + */
1265 +int afs_cell_init(void)
1266 +{
1267 +       char *cp;
1268 +       int ret;
1269 +
1270 +       _enter("");
1271 +
1272 +       if (!rootcell) {
1273 +               printk("kAFS: no root cell specified\n");
1274 +               return -EINVAL;
1275 +       }
1276 +
1277 +       cp = strchr(rootcell,':');
1278 +       if (!cp) {
1279 +               printk("kAFS: no VL server IP addresses specified\n");
1280 +               return -EINVAL;
1281 +       }
1282 +
1283 +       /* allocate a cell record for the root cell */
1284 +       *cp++ = 0;
1285 +       ret = afs_cell_create(rootcell,cp,&afs_cell_root);
1286 +       if (ret==0)
1287 +               afs_get_cell(afs_cell_root);
1288 +
1289 +       _leave(" = %d",ret);
1290 +       return ret;
1291 +
1292 +} /* end afs_cell_init() */
1293 +
1294 +/*****************************************************************************/
1295 +/*
1296 + * lookup a cell record
1297 + */
1298 +int afs_cell_lookup(afs_cache_t *cache, const char *name, afs_cell_t **_cell)
1299 +{
1300 +       struct list_head *_p;
1301 +       afs_cell_t *cell;
1302 +
1303 +       _enter("\"%s\",",name?name:"*thiscell*");
1304 +
1305 +       cell = afs_cell_root;
1306 +
1307 +       if (name) {
1308 +               /* if the cell was named, look for it in the cell record list */
1309 +               cell = NULL;
1310 +               read_lock(&afs_cells_lock);
1311 +
1312 +               list_for_each(_p,&afs_cells) {
1313 +                       cell = list_entry(_p,afs_cell_t,link);
1314 +                       if (strcmp(cell->name,name)==0)
1315 +                               break;
1316 +                       cell = NULL;
1317 +               }
1318 +
1319 +               read_unlock(&afs_cells_lock);
1320 +       }
1321 +
1322 +       if (cell)
1323 +               afs_get_cell(cell);
1324 +
1325 +       *_cell = cell;
1326 +       _leave(" = %d (%p)",cell?0:-ENOENT,cell);
1327 +       return cell ? 0 : -ENOENT;
1328 +
1329 +} /* end afs_cell_lookup() */
1330 +
1331 +/*****************************************************************************/
1332 +/*
1333 + * try and get a cell record
1334 + */
1335 +afs_cell_t *afs_get_cell_maybe(afs_cell_t **_cell)
1336 +{
1337 +       afs_cell_t *cell;
1338 +
1339 +       write_lock(&afs_cells_lock);
1340 +
1341 +       cell = *_cell;
1342 +       if (cell && !list_empty(&cell->link))
1343 +               atomic_inc(&cell->usage);
1344 +       else 
1345 +               cell = NULL;
1346 +
1347 +       write_unlock(&afs_cells_lock);
1348 +
1349 +       return cell;
1350 +} /* end afs_get_cell_maybe() */
1351 +
1352 +/*****************************************************************************/
1353 +/*
1354 + * destroy a cell record
1355 + */
1356 +void afs_put_cell(afs_cell_t *cell)
1357 +{
1358 +       _enter("%p{%d,%s}",cell,atomic_read(&cell->usage),cell->name);
1359 +
1360 +       /* sanity check */
1361 +       if (atomic_read(&cell->usage)<=0)
1362 +               BUG();
1363 +
1364 +       /* to prevent a race, the decrement and the dequeue must be effectively atomic */
1365 +       write_lock(&afs_cells_lock);
1366 +
1367 +       if (likely(!atomic_dec_and_test(&cell->usage))) {
1368 +               write_unlock(&afs_cells_lock);
1369 +               _leave("");
1370 +               return;
1371 +       }
1372 +
1373 +       write_unlock(&afs_cells_lock);
1374 +
1375 +       if (!list_empty(&cell->sv_list))        BUG();
1376 +       if (!list_empty(&cell->sv_graveyard))   BUG();
1377 +       if (!list_empty(&cell->vl_list))        BUG();
1378 +       if (!list_empty(&cell->vl_graveyard))   BUG();
1379 +
1380 +       _leave(" [unused]");
1381 +} /* end afs_put_cell() */
1382 +
1383 +/*****************************************************************************/
1384 +/*
1385 + * destroy a cell record
1386 + */
1387 +static void afs_cell_destroy(afs_cell_t *cell)
1388 +{
1389 +       _enter("%p{%d,%s}",cell,atomic_read(&cell->usage),cell->name);
1390 +
1391 +       /* to prevent a race, the decrement and the dequeue must be effectively atomic */
1392 +       write_lock(&afs_cells_lock);
1393 +
1394 +       /* sanity check */
1395 +       if (atomic_read(&cell->usage)!=0)
1396 +               BUG();
1397 +
1398 +       list_del_init(&cell->link);
1399 +
1400 +       write_unlock(&afs_cells_lock);
1401 +
1402 +       down_write(&afs_cells_sem);
1403 +
1404 +       afs_proc_cell_remove(cell);
1405 +
1406 +       down_write(&afs_proc_cells_sem);
1407 +       list_del_init(&afs_cell_root->proc_link);
1408 +       up_write(&afs_proc_cells_sem);
1409 +
1410 +       up_write(&afs_cells_sem);
1411 +
1412 +       if (!list_empty(&cell->sv_list))        BUG();
1413 +       if (!list_empty(&cell->sv_graveyard))   BUG();
1414 +       if (!list_empty(&cell->vl_list))        BUG();
1415 +       if (!list_empty(&cell->vl_graveyard))   BUG();
1416 +
1417 +       /* finish cleaning up the cell */
1418 +       kfree(cell);
1419 +
1420 +       _leave(" [destroyed]");
1421 +} /* end afs_cell_destroy() */
1422 +
1423 +/*****************************************************************************/
1424 +/*
1425 + * lookup the server record corresponding to an Rx RPC peer
1426 + */
1427 +int afs_server_find_by_peer(const struct rxrpc_peer *peer, afs_server_t **_server)
1428 +{
1429 +       struct list_head *_pc, *_ps;
1430 +       afs_server_t *server;
1431 +       afs_cell_t *cell;
1432 +
1433 +       _enter("%p{a=%08x},",peer,ntohl(peer->addr.s_addr));
1434 +
1435 +       /* search the cell list */
1436 +       read_lock(&afs_cells_lock);
1437 +
1438 +       list_for_each(_pc,&afs_cells) {
1439 +               cell = list_entry(_pc,afs_cell_t,link);
1440 +
1441 +               _debug("? cell %s",cell->name);
1442 +
1443 +               write_lock(&cell->sv_lock);
1444 +
1445 +               /* check the active list */
1446 +               list_for_each(_ps,&cell->sv_list) {
1447 +                       server = list_entry(_ps,afs_server_t,link);
1448 +
1449 +                       _debug("?? server %08x",ntohl(server->addr.s_addr));
1450 +
1451 +                       if (memcmp(&server->addr,&peer->addr,sizeof(struct in_addr))==0)
1452 +                               goto found_server;
1453 +               }
1454 +
1455 +               /* check the inactive list */
1456 +               spin_lock(&cell->sv_gylock);
1457 +               list_for_each(_ps,&cell->sv_graveyard) {
1458 +                       server = list_entry(_ps,afs_server_t,link);
1459 +
1460 +                       _debug("?? dead server %08x",ntohl(server->addr.s_addr));
1461 +
1462 +                       if (memcmp(&server->addr,&peer->addr,sizeof(struct in_addr))==0)
1463 +                               goto found_dead_server;
1464 +               }
1465 +               spin_unlock(&cell->sv_gylock);
1466 +
1467 +               write_unlock(&cell->sv_lock);
1468 +       }
1469 +       read_unlock(&afs_cells_lock);
1470 +
1471 +       _leave(" = -ENOENT");
1472 +       return -ENOENT;
1473 +
1474 +       /* we found it in the graveyard - resurrect it */
1475 + found_dead_server:
1476 +       list_del(&server->link);
1477 +       list_add_tail(&server->link,&cell->sv_list);
1478 +       afs_get_server(server);
1479 +       afs_kafstimod_del_timer(&server->timeout);
1480 +       spin_unlock(&cell->sv_gylock);
1481 +       goto success;
1482 +
1483 +       /* we found it - increment its ref count and return it */
1484 + found_server:
1485 +       afs_get_server(server);
1486 +
1487 + success:
1488 +       write_unlock(&cell->sv_lock);
1489 +       read_unlock(&afs_cells_lock);
1490 +
1491 +       *_server = server;
1492 +       _leave(" = 0 (s=%p c=%p)",server,cell);
1493 +       return 0;
1494 +
1495 +} /* end afs_server_find_by_peer() */
1496 +
1497 +/*****************************************************************************/
1498 +/*
1499 + * purge in-memory cell database on module unload
1500 + * - the timeout daemon is stopped before calling this
1501 + */
1502 +void afs_cell_purge(void)
1503 +{
1504 +       afs_vlocation_t *vlocation;
1505 +       afs_cell_t *cell;
1506 +
1507 +       _enter("");
1508 +
1509 +       if (afs_cell_root)
1510 +               afs_put_cell(afs_cell_root);
1511 +
1512 +       while (!list_empty(&afs_cells)) {
1513 +               cell = NULL;
1514 +
1515 +               /* remove the next cell from the front of the list */
1516 +               write_lock(&afs_cells_lock);
1517 +
1518 +               if (!list_empty(&afs_cells)) {
1519 +                       cell = list_entry(afs_cells.next,afs_cell_t,link);
1520 +                       list_del_init(&cell->link);
1521 +               }
1522 +
1523 +               write_unlock(&afs_cells_lock);
1524 +
1525 +               if (cell) {
1526 +                       _debug("PURGING CELL %s (%d)",cell->name,atomic_read(&cell->usage));
1527 +
1528 +                       if (!list_empty(&cell->sv_list)) BUG();
1529 +                       if (!list_empty(&cell->vl_list)) BUG();
1530 +
1531 +                       /* purge the cell's VL graveyard list */
1532 +                       _debug(" - clearing VL graveyard");
1533 +
1534 +                       spin_lock(&cell->vl_gylock);
1535 +
1536 +                       while (!list_empty(&cell->vl_graveyard)) {
1537 +                               vlocation = list_entry(cell->vl_graveyard.next,
1538 +                                                      afs_vlocation_t,link);
1539 +                               list_del_init(&vlocation->link);
1540 +
1541 +                               afs_kafstimod_del_timer(&vlocation->timeout);
1542 +
1543 +                               spin_unlock(&cell->vl_gylock);
1544 +
1545 +                               afs_vlocation_do_timeout(vlocation);
1546 +#warning race if move to use krxtimod instead of kafstimod
1547 +
1548 +                               spin_lock(&cell->vl_gylock);
1549 +                       }
1550 +
1551 +                       spin_unlock(&cell->vl_gylock);
1552 +
1553 +                       /* purge the cell's server graveyard list */
1554 +                       _debug(" - clearing server graveyard");
1555 +
1556 +                       spin_lock(&cell->sv_gylock);
1557 +
1558 +                       while (!list_empty(&cell->sv_graveyard)) {
1559 +                               afs_server_t *server;
1560 +
1561 +                               server = list_entry(cell->sv_graveyard.next,afs_server_t,link);
1562 +                               list_del_init(&server->link);
1563 +
1564 +                               afs_kafstimod_del_timer(&server->timeout);
1565 +
1566 +                               spin_unlock(&cell->sv_gylock);
1567 +
1568 +                               afs_server_do_timeout(server);
1569 +
1570 +                               spin_lock(&cell->sv_gylock);
1571 +                       }
1572 +
1573 +                       spin_unlock(&cell->sv_gylock);
1574 +
1575 +                       /* now the cell should be left with no references */
1576 +                       afs_cell_destroy(cell);
1577 +               }
1578 +       }
1579 +
1580 +       _leave("");
1581 +} /* end afs_cell_purge() */
1582 diff -urNp linux-5240/fs/afs/cell.h linux-5250/fs/afs/cell.h
1583 --- linux-5240/fs/afs/cell.h    1970-01-01 01:00:00.000000000 +0100
1584 +++ linux-5250/fs/afs/cell.h    
1585 @@ -0,0 +1,64 @@
1586 +/* cell.h: AFS cell record
1587 + *
1588 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
1589 + * Written by David Howells (dhowells@redhat.com)
1590 + *
1591 + * This program is free software; you can redistribute it and/or
1592 + * modify it under the terms of the GNU General Public License
1593 + * as published by the Free Software Foundation; either version
1594 + * 2 of the License, or (at your option) any later version.
1595 + */
1596 +
1597 +#ifndef _LINUX_AFS_CELL_H
1598 +#define _LINUX_AFS_CELL_H
1599 +
1600 +#include "types.h"
1601 +#include "cache-layout.h"
1602 +
1603 +extern volatile int afs_cells_being_purged; /* T when cells are being purged by rmmod */
1604 +
1605 +/*****************************************************************************/
1606 +/*
1607 + * AFS cell record
1608 + */
1609 +struct afs_cell
1610 +{
1611 +       atomic_t                usage;
1612 +       struct list_head        link;           /* main cell list link */
1613 +       struct list_head        proc_link;      /* /proc cell list link */
1614 +       struct proc_dir_entry   *proc_dir;      /* /proc dir for this cell */
1615 +       afs_cache_cellix_t      cache_ix;       /* cell cache index */
1616 +
1617 +       /* server record management */
1618 +       rwlock_t                sv_lock;        /* active server list lock */
1619 +       struct list_head        sv_list;        /* active server list */
1620 +       struct list_head        sv_graveyard;   /* inactive server list */
1621 +       spinlock_t              sv_gylock;      /* inactive server list lock */
1622 +
1623 +       /* volume location record management */
1624 +       struct rw_semaphore     vl_sem;         /* volume management serialisation semaphore */
1625 +       struct list_head        vl_list;        /* cell's active VL record list */
1626 +       struct list_head        vl_graveyard;   /* cell's inactive VL record list */
1627 +       spinlock_t              vl_gylock;      /* graveyard lock */
1628 +       unsigned short          vl_naddrs;      /* number of VL servers in addr list */
1629 +       unsigned short          vl_curr_svix;   /* current server index */
1630 +       struct in_addr          vl_addrs[16];   /* cell VL server addresses */
1631 +
1632 +       char                    name[0];        /* cell name - must go last */
1633 +};
1634 +
1635 +extern int afs_cell_init(void);
1636 +
1637 +extern int afs_cell_create(const char *name, char *vllist, afs_cell_t **_cell);
1638 +
1639 +extern int afs_cell_lookup(afs_cache_t *cache, const char *name, afs_cell_t **_cell);
1640 +
1641 +#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
1642 +
1643 +extern afs_cell_t *afs_get_cell_maybe(afs_cell_t **_cell);
1644 +
1645 +extern void afs_put_cell(afs_cell_t *cell);
1646 +
1647 +extern void afs_cell_purge(void);
1648 +
1649 +#endif /* _LINUX_AFS_CELL_H */
1650 diff -urNp linux-5240/fs/afs/cmservice.c linux-5250/fs/afs/cmservice.c
1651 --- linux-5240/fs/afs/cmservice.c       1970-01-01 01:00:00.000000000 +0100
1652 +++ linux-5250/fs/afs/cmservice.c       
1653 @@ -0,0 +1,639 @@
1654 +/* cmservice.c: AFS Cache Manager Service
1655 + *
1656 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
1657 + * Written by David Howells (dhowells@redhat.com)
1658 + *
1659 + * This program is free software; you can redistribute it and/or
1660 + * modify it under the terms of the GNU General Public License
1661 + * as published by the Free Software Foundation; either version
1662 + * 2 of the License, or (at your option) any later version.
1663 + */
1664 +
1665 +#include <linux/version.h>
1666 +#include <linux/module.h>
1667 +#include <linux/init.h>
1668 +#include <linux/sched.h>
1669 +#include <linux/completion.h>
1670 +#include "server.h"
1671 +#include "cell.h"
1672 +#include "transport.h"
1673 +#include <rxrpc/rxrpc.h>
1674 +#include <rxrpc/transport.h>
1675 +#include <rxrpc/connection.h>
1676 +#include <rxrpc/call.h>
1677 +#include "cmservice.h"
1678 +#include "internal.h"
1679 +
1680 +static unsigned afscm_usage;           /* AFS cache manager usage count */
1681 +static struct rw_semaphore afscm_sem;  /* AFS cache manager start/stop semaphore */
1682 +
1683 +static int afscm_new_call(struct rxrpc_call *call);
1684 +static void afscm_attention(struct rxrpc_call *call);
1685 +static void afscm_error(struct rxrpc_call *call);
1686 +static void afscm_aemap(struct rxrpc_call *call);
1687 +
1688 +static void _SRXAFSCM_CallBack(struct rxrpc_call *call);
1689 +static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call);
1690 +static void _SRXAFSCM_Probe(struct rxrpc_call *call);
1691 +
1692 +typedef void (*_SRXAFSCM_xxxx_t)(struct rxrpc_call *call);
1693 +
1694 +static const struct rxrpc_operation AFSCM_ops[] = {
1695 +       {
1696 +               id:     204,
1697 +               asize:  RXRPC_APP_MARK_EOF,
1698 +               name:   "CallBack",
1699 +               user:   _SRXAFSCM_CallBack,
1700 +       },
1701 +       {
1702 +               id:     205,
1703 +               asize:  RXRPC_APP_MARK_EOF,
1704 +               name:   "InitCallBackState",
1705 +               user:   _SRXAFSCM_InitCallBackState,
1706 +       },
1707 +       {
1708 +               id:     206,
1709 +               asize:  RXRPC_APP_MARK_EOF,
1710 +               name:   "Probe",
1711 +               user:   _SRXAFSCM_Probe,
1712 +       },
1713 +#if 0
1714 +       {
1715 +               id:     207,
1716 +               asize:  RXRPC_APP_MARK_EOF,
1717 +               name:   "GetLock",
1718 +               user:   _SRXAFSCM_GetLock,
1719 +       },
1720 +       {
1721 +               id:     208,
1722 +               asize:  RXRPC_APP_MARK_EOF,
1723 +               name:   "GetCE",
1724 +               user:   _SRXAFSCM_GetCE,
1725 +       },
1726 +       {
1727 +               id:     209,
1728 +               asize:  RXRPC_APP_MARK_EOF,
1729 +               name:   "GetXStatsVersion",
1730 +               user:   _SRXAFSCM_GetXStatsVersion,
1731 +       },
1732 +       {
1733 +               id:     210,
1734 +               asize:  RXRPC_APP_MARK_EOF,
1735 +               name:   "GetXStats",
1736 +               user:   _SRXAFSCM_GetXStats,
1737 +       }
1738 +#endif
1739 +};
1740 +
1741 +static struct rxrpc_service AFSCM_service = {
1742 +       name:           "AFS/CM",
1743 +       owner:          THIS_MODULE,
1744 +       link:           LIST_HEAD_INIT(AFSCM_service.link),
1745 +       new_call:       afscm_new_call,
1746 +       service_id:     1,
1747 +       attn_func:      afscm_attention,
1748 +       error_func:     afscm_error,
1749 +       aemap_func:     afscm_aemap,
1750 +       ops_begin:      &AFSCM_ops[0],
1751 +       ops_end:        &AFSCM_ops[sizeof(AFSCM_ops)/sizeof(AFSCM_ops[0])],
1752 +};
1753 +
1754 +static DECLARE_COMPLETION(kafscmd_alive);
1755 +static DECLARE_COMPLETION(kafscmd_dead);
1756 +static DECLARE_WAIT_QUEUE_HEAD(kafscmd_sleepq);
1757 +static LIST_HEAD(kafscmd_attention_list);
1758 +static LIST_HEAD(afscm_calls);
1759 +static spinlock_t afscm_calls_lock = SPIN_LOCK_UNLOCKED;
1760 +static spinlock_t kafscmd_attention_lock = SPIN_LOCK_UNLOCKED;
1761 +static int kafscmd_die;
1762 +
1763 +/*****************************************************************************/
1764 +/*
1765 + * AFS Cache Manager kernel thread
1766 + */
1767 +static int kafscmd(void *arg)
1768 +{
1769 +       DECLARE_WAITQUEUE(myself,current);
1770 +
1771 +       struct rxrpc_call *call;
1772 +       _SRXAFSCM_xxxx_t func;
1773 +       int die;
1774 +
1775 +       printk("kAFS: Started kafscmd %d\n",current->pid);
1776 +       strcpy(current->comm,"kafscmd");
1777 +
1778 +       daemonize();
1779 +
1780 +       complete(&kafscmd_alive);
1781 +
1782 +       /* only certain signals are of interest */
1783 +       spin_lock_irq(&current->sigmask_lock);
1784 +       siginitsetinv(&current->blocked,0);
1785 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
1786 +       recalc_sigpending();
1787 +#else
1788 +       recalc_sigpending(current);
1789 +#endif
1790 +       spin_unlock_irq(&current->sigmask_lock);
1791 +
1792 +       /* loop around looking for things to attend to */
1793 +       do {
1794 +               if (list_empty(&kafscmd_attention_list)) {
1795 +                       set_current_state(TASK_INTERRUPTIBLE);
1796 +                       add_wait_queue(&kafscmd_sleepq,&myself);
1797 +
1798 +                       for (;;) {
1799 +                               set_current_state(TASK_INTERRUPTIBLE);
1800 +                               if (!list_empty(&kafscmd_attention_list) ||
1801 +                                   signal_pending(current) ||
1802 +                                   kafscmd_die)
1803 +                                       break;
1804 +
1805 +                               schedule();
1806 +                       }
1807 +
1808 +                       remove_wait_queue(&kafscmd_sleepq,&myself);
1809 +                       set_current_state(TASK_RUNNING);
1810 +               }
1811 +
1812 +               die = kafscmd_die;
1813 +
1814 +               /* dequeue the next call requiring attention */
1815 +               call = NULL;
1816 +               spin_lock(&kafscmd_attention_lock);
1817 +
1818 +               if (!list_empty(&kafscmd_attention_list)) {
1819 +                       call = list_entry(kafscmd_attention_list.next,
1820 +                                         struct rxrpc_call,
1821 +                                         app_attn_link);
1822 +                       list_del_init(&call->app_attn_link);
1823 +                       die = 0;
1824 +               }
1825 +
1826 +               spin_unlock(&kafscmd_attention_lock);
1827 +
1828 +               if (call) {
1829 +                       /* act upon it */
1830 +                       _debug("@@@ Begin Attend Call %p",call);
1831 +
1832 +                       func = call->app_user;
1833 +                       if (func)
1834 +                               func(call);
1835 +
1836 +                       rxrpc_put_call(call);
1837 +
1838 +                       _debug("@@@ End Attend Call %p",call);
1839 +               }
1840 +
1841 +       } while(!die);
1842 +
1843 +       /* and that's all */
1844 +       complete_and_exit(&kafscmd_dead,0);
1845 +
1846 +} /* end kafscmd() */
1847 +
1848 +/*****************************************************************************/
1849 +/*
1850 + * handle a call coming in to the cache manager
1851 + * - if I want to keep the call, I must increment its usage count
1852 + * - the return value will be negated and passed back in an abort packet if non-zero
1853 + * - serialised by virtue of there only being one krxiod
1854 + */
1855 +static int afscm_new_call(struct rxrpc_call *call)
1856 +{
1857 +       _enter("%p{cid=%u u=%d}",call,ntohl(call->call_id),atomic_read(&call->usage));
1858 +
1859 +       rxrpc_get_call(call);
1860 +
1861 +       /* add to my current call list */
1862 +       spin_lock(&afscm_calls_lock);
1863 +       list_add(&call->app_link,&afscm_calls);
1864 +       spin_unlock(&afscm_calls_lock);
1865 +
1866 +       _leave(" = 0");
1867 +       return 0;
1868 +
1869 +} /* end afscm_new_call() */
1870 +
1871 +/*****************************************************************************/
1872 +/*
1873 + * queue on the kafscmd queue for attention
1874 + */
1875 +static void afscm_attention(struct rxrpc_call *call)
1876 +{
1877 +       _enter("%p{cid=%u u=%d}",call,ntohl(call->call_id),atomic_read(&call->usage));
1878 +
1879 +       spin_lock(&kafscmd_attention_lock);
1880 +
1881 +       if (list_empty(&call->app_attn_link)) {
1882 +               list_add_tail(&call->app_attn_link,&kafscmd_attention_list);
1883 +               rxrpc_get_call(call);
1884 +       }
1885 +
1886 +       spin_unlock(&kafscmd_attention_lock);
1887 +
1888 +       wake_up(&kafscmd_sleepq);
1889 +
1890 +       _leave(" {u=%d}",atomic_read(&call->usage));
1891 +} /* end afscm_attention() */
1892 +
1893 +/*****************************************************************************/
1894 +/*
1895 + * handle my call being aborted
1896 + * - clean up, dequeue and put my ref to the call
1897 + */
1898 +static void afscm_error(struct rxrpc_call *call)
1899 +{
1900 +       int removed;
1901 +
1902 +       _enter("%p{est=%s ac=%u er=%d}",
1903 +              call,
1904 +              rxrpc_call_error_states[call->app_err_state],
1905 +              call->app_abort_code,
1906 +              call->app_errno);
1907 +
1908 +       spin_lock(&kafscmd_attention_lock);
1909 +
1910 +       if (list_empty(&call->app_attn_link)) {
1911 +               list_add_tail(&call->app_attn_link,&kafscmd_attention_list);
1912 +               rxrpc_get_call(call);
1913 +       }
1914 +
1915 +       spin_unlock(&kafscmd_attention_lock);
1916 +
1917 +       removed = 0;
1918 +       spin_lock(&afscm_calls_lock);
1919 +       if (!list_empty(&call->app_link)) {
1920 +               list_del_init(&call->app_link);
1921 +               removed = 1;
1922 +       }
1923 +       spin_unlock(&afscm_calls_lock);
1924 +
1925 +       if (removed)
1926 +               rxrpc_put_call(call);
1927 +
1928 +       wake_up(&kafscmd_sleepq);
1929 +
1930 +       _leave("");
1931 +} /* end afscm_error() */
1932 +
1933 +/*****************************************************************************/
1934 +/*
1935 + * map afs abort codes to/from Linux error codes
1936 + * - called with call->lock held
1937 + */
1938 +static void afscm_aemap(struct rxrpc_call *call)
1939 +{
1940 +       switch (call->app_err_state) {
1941 +       case RXRPC_ESTATE_LOCAL_ABORT:
1942 +               call->app_abort_code = -call->app_errno;
1943 +               break;
1944 +       case RXRPC_ESTATE_PEER_ABORT:
1945 +               call->app_errno = -ECONNABORTED;
1946 +               break;
1947 +       default:
1948 +               break;
1949 +       }
1950 +} /* end afscm_aemap() */
1951 +
1952 +/*****************************************************************************/
1953 +/*
1954 + * start the cache manager service if not already started
1955 + */
1956 +int afscm_start(void)
1957 +{
1958 +       int ret;
1959 +
1960 +       down_write(&afscm_sem);
1961 +       if (!afscm_usage) {
1962 +               ret = kernel_thread(kafscmd,NULL,0);
1963 +               if (ret<0)
1964 +                       goto out;
1965 +
1966 +               wait_for_completion(&kafscmd_alive);
1967 +
1968 +               ret = rxrpc_add_service(afs_transport,&AFSCM_service);
1969 +               if (ret<0)
1970 +                       goto kill;
1971 +       }
1972 +
1973 +       afscm_usage++;
1974 +       up_write(&afscm_sem);
1975 +
1976 +       return 0;
1977 +
1978 + kill:
1979 +       kafscmd_die = 1;
1980 +       wake_up(&kafscmd_sleepq);
1981 +       wait_for_completion(&kafscmd_dead);
1982 +
1983 + out:
1984 +       up_write(&afscm_sem);
1985 +       return ret;
1986 +
1987 +} /* end afscm_start() */
1988 +
1989 +/*****************************************************************************/
1990 +/*
1991 + * stop the cache manager service
1992 + */
1993 +void afscm_stop(void)
1994 +{
1995 +       struct rxrpc_call *call;
1996 +
1997 +       down_write(&afscm_sem);
1998 +
1999 +       if (afscm_usage==0) BUG();
2000 +       afscm_usage--;
2001 +
2002 +       if (afscm_usage==0) {
2003 +               /* don't want more incoming calls */
2004 +               rxrpc_del_service(afs_transport,&AFSCM_service);
2005 +
2006 +               /* abort any calls I've still got open (the afscm_error() will dequeue them) */
2007 +               spin_lock(&afscm_calls_lock);
2008 +               while (!list_empty(&afscm_calls)) {
2009 +                       call = list_entry(afscm_calls.next,struct rxrpc_call,app_link);
2010 +                       list_del_init(&call->app_link);
2011 +                       rxrpc_get_call(call);
2012 +                       spin_unlock(&afscm_calls_lock);
2013 +
2014 +                       rxrpc_call_abort(call,-ESRCH); /* abort, dequeue and put */
2015 +
2016 +                       rxrpc_put_call(call);
2017 +
2018 +                       spin_lock(&afscm_calls_lock);
2019 +               }
2020 +               spin_unlock(&afscm_calls_lock);
2021 +
2022 +               /* get rid of my daemon */
2023 +               kafscmd_die = 1;
2024 +               wake_up(&kafscmd_sleepq);
2025 +               wait_for_completion(&kafscmd_dead);
2026 +
2027 +               /* dispose of any calls waiting for attention */
2028 +               spin_lock(&kafscmd_attention_lock);
2029 +               while (!list_empty(&kafscmd_attention_list)) {
2030 +                       call = list_entry(kafscmd_attention_list.next,
2031 +                                         struct rxrpc_call,
2032 +                                         app_attn_link);
2033 +
2034 +                       list_del_init(&call->app_attn_link);
2035 +                       spin_unlock(&kafscmd_attention_lock);
2036 +
2037 +                       rxrpc_put_call(call);
2038 +
2039 +                       spin_lock(&kafscmd_attention_lock);
2040 +               }
2041 +               spin_unlock(&kafscmd_attention_lock);
2042 +       }
2043 +
2044 +       up_write(&afscm_sem);
2045 +
2046 +} /* end afscm_stop() */
2047 +
2048 +/*****************************************************************************/
2049 +/*
2050 + * handle the fileserver breaking a set of callbacks
2051 + */
2052 +static void _SRXAFSCM_CallBack(struct rxrpc_call *call)
2053 +{
2054 +       afs_server_t *server;
2055 +       size_t count, qty, tmp;
2056 +       int ret = 0, removed;
2057 +
2058 +       _enter("%p{acs=%s}",call,rxrpc_call_states[call->app_call_state]);
2059 +
2060 +       server = afs_server_get_from_peer(call->conn->peer);
2061 +
2062 +       switch (call->app_call_state) {
2063 +               /* we've received the last packet
2064 +                * - drain all the data from the call and send the reply
2065 +                */
2066 +       case RXRPC_CSTATE_SRVR_GOT_ARGS:
2067 +               ret = -EBADMSG;
2068 +               qty = call->app_ready_qty;
2069 +               if (qty<8 || qty>50*(6*4)+8)
2070 +                       break;
2071 +
2072 +               {
2073 +                       afs_callback_t *cb, *pcb;
2074 +                       int loop;
2075 +                       u32 *fp, *bp;
2076 +
2077 +                       fp = rxrpc_call_alloc_scratch(call,qty);
2078 +
2079 +                       /* drag the entire argument block out to the scratch space */
2080 +                       ret = rxrpc_call_read_data(call,fp,qty,0);
2081 +                       if (ret<0)
2082 +                               break;
2083 +
2084 +                       /* and unmarshall the parameter block */
2085 +                       ret = -EBADMSG;
2086 +                       count = ntohl(*fp++);
2087 +                       if (count>AFSCBMAX ||
2088 +                           (count*(3*4)+8 != qty && count*(6*4)+8 != qty))
2089 +                               break;
2090 +
2091 +                       bp = fp + count*3;
2092 +                       tmp = ntohl(*bp++);
2093 +                       if (tmp>0 && tmp!=count)
2094 +                               break;
2095 +                       if (tmp==0)
2096 +                               bp = NULL;
2097 +
2098 +                       pcb = cb = rxrpc_call_alloc_scratch_s(call,afs_callback_t);
2099 +
2100 +                       for (loop=count-1; loop>=0; loop--) {
2101 +                               pcb->fid.vid    = ntohl(*fp++);
2102 +                               pcb->fid.vnode  = ntohl(*fp++);
2103 +                               pcb->fid.unique = ntohl(*fp++);
2104 +                               if (bp) {
2105 +                                       pcb->version    = ntohl(*bp++);
2106 +                                       pcb->expiry     = ntohl(*bp++);
2107 +                                       pcb->type       = ntohl(*bp++);
2108 +                               }
2109 +                               else {
2110 +                                       pcb->version    = 0;
2111 +                                       pcb->expiry     = 0;
2112 +                                       pcb->type       = AFSCM_CB_UNTYPED;
2113 +                               }
2114 +                               pcb++;
2115 +                       }
2116 +
2117 +                       /* invoke the actual service routine */
2118 +                       ret = SRXAFSCM_CallBack(server,count,cb);
2119 +                       if (ret<0)
2120 +                               break;
2121 +               }
2122 +
2123 +               /* send the reply */
2124 +               ret = rxrpc_call_write_data(call,0,NULL,RXRPC_LAST_PACKET,GFP_KERNEL,0,&count);
2125 +               if (ret<0)
2126 +                       break;
2127 +               break;
2128 +
2129 +               /* operation complete */
2130 +       case RXRPC_CSTATE_COMPLETE:
2131 +               call->app_user = NULL;
2132 +               removed = 0;
2133 +               spin_lock(&afscm_calls_lock);
2134 +               if (!list_empty(&call->app_link)) {
2135 +                       list_del_init(&call->app_link);
2136 +                       removed = 1;
2137 +               }
2138 +               spin_unlock(&afscm_calls_lock);
2139 +
2140 +               if (removed)
2141 +                       rxrpc_put_call(call);
2142 +               break;
2143 +
2144 +               /* operation terminated on error */
2145 +       case RXRPC_CSTATE_ERROR:
2146 +               call->app_user = NULL;
2147 +               break;
2148 +
2149 +       default:
2150 +               break;
2151 +       }
2152 +
2153 +       if (ret<0)
2154 +               rxrpc_call_abort(call,ret);
2155 +
2156 +       if (server) afs_put_server(server);
2157 +
2158 +       _leave(" = %d",ret);
2159 +
2160 +} /* end _SRXAFSCM_CallBack() */
2161 +
2162 +/*****************************************************************************/
2163 +/*
2164 + * handle the fileserver asking us to initialise our callback state
2165 + */
2166 +static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call)
2167 +{
2168 +       afs_server_t *server;
2169 +       size_t count;
2170 +       int ret = 0, removed;
2171 +
2172 +       _enter("%p{acs=%s}",call,rxrpc_call_states[call->app_call_state]);
2173 +
2174 +       server = afs_server_get_from_peer(call->conn->peer);
2175 +
2176 +       switch (call->app_call_state) {
2177 +               /* we've received the last packet - drain all the data from the call */
2178 +       case RXRPC_CSTATE_SRVR_GOT_ARGS:
2179 +               /* shouldn't be any args */
2180 +               ret = -EBADMSG;
2181 +               break;
2182 +
2183 +               /* send the reply when asked for it */
2184 +       case RXRPC_CSTATE_SRVR_SND_REPLY:
2185 +               /* invoke the actual service routine */
2186 +               ret = SRXAFSCM_InitCallBackState(server);
2187 +               if (ret<0)
2188 +                       break;
2189 +
2190 +               ret = rxrpc_call_write_data(call,0,NULL,RXRPC_LAST_PACKET,GFP_KERNEL,0,&count);
2191 +               if (ret<0)
2192 +                       break;
2193 +               break;
2194 +
2195 +               /* operation complete */
2196 +       case RXRPC_CSTATE_COMPLETE:
2197 +               call->app_user = NULL;
2198 +               removed = 0;
2199 +               spin_lock(&afscm_calls_lock);
2200 +               if (!list_empty(&call->app_link)) {
2201 +                       list_del_init(&call->app_link);
2202 +                       removed = 1;
2203 +               }
2204 +               spin_unlock(&afscm_calls_lock);
2205 +
2206 +               if (removed)
2207 +                       rxrpc_put_call(call);
2208 +               break;
2209 +
2210 +               /* operation terminated on error */
2211 +       case RXRPC_CSTATE_ERROR:
2212 +               call->app_user = NULL;
2213 +               break;
2214 +
2215 +       default:
2216 +               break;
2217 +       }
2218 +
2219 +       if (ret<0)
2220 +               rxrpc_call_abort(call,ret);
2221 +
2222 +       if (server) afs_put_server(server);
2223 +
2224 +       _leave(" = %d",ret);
2225 +
2226 +} /* end _SRXAFSCM_InitCallBackState() */
2227 +
2228 +/*****************************************************************************/
2229 +/*
2230 + * handle a probe from a fileserver
2231 + */
2232 +static void _SRXAFSCM_Probe(struct rxrpc_call *call)
2233 +{
2234 +       afs_server_t *server;
2235 +       size_t count;
2236 +       int ret = 0, removed;
2237 +
2238 +       _enter("%p{acs=%s}",call,rxrpc_call_states[call->app_call_state]);
2239 +
2240 +       server = afs_server_get_from_peer(call->conn->peer);
2241 +
2242 +       switch (call->app_call_state) {
2243 +               /* we've received the last packet - drain all the data from the call */
2244 +       case RXRPC_CSTATE_SRVR_GOT_ARGS:
2245 +               /* shouldn't be any args */
2246 +               ret = -EBADMSG;
2247 +               break;
2248 +
2249 +               /* send the reply when asked for it */
2250 +       case RXRPC_CSTATE_SRVR_SND_REPLY:
2251 +               /* invoke the actual service routine */
2252 +               ret = SRXAFSCM_Probe(server);
2253 +               if (ret<0)
2254 +                       break;
2255 +
2256 +               ret = rxrpc_call_write_data(call,0,NULL,RXRPC_LAST_PACKET,GFP_KERNEL,0,&count);
2257 +               if (ret<0)
2258 +                       break;
2259 +               break;
2260 +
2261 +               /* operation complete */
2262 +       case RXRPC_CSTATE_COMPLETE:
2263 +               call->app_user = NULL;
2264 +               removed = 0;
2265 +               spin_lock(&afscm_calls_lock);
2266 +               if (!list_empty(&call->app_link)) {
2267 +                       list_del_init(&call->app_link);
2268 +                       removed = 1;
2269 +               }
2270 +               spin_unlock(&afscm_calls_lock);
2271 +
2272 +               if (removed)
2273 +                       rxrpc_put_call(call);
2274 +               break;
2275 +
2276 +               /* operation terminated on error */
2277 +       case RXRPC_CSTATE_ERROR:
2278 +               call->app_user = NULL;
2279 +               break;
2280 +
2281 +       default:
2282 +               break;
2283 +       }
2284 +
2285 +       if (ret<0)
2286 +               rxrpc_call_abort(call,ret);
2287 +
2288 +       if (server) afs_put_server(server);
2289 +
2290 +       _leave(" = %d",ret);
2291 +
2292 +} /* end _SRXAFSCM_Probe() */
2293 diff -urNp linux-5240/fs/afs/cmservice.h linux-5250/fs/afs/cmservice.h
2294 --- linux-5240/fs/afs/cmservice.h       1970-01-01 01:00:00.000000000 +0100
2295 +++ linux-5250/fs/afs/cmservice.h       
2296 @@ -0,0 +1,27 @@
2297 +/* cmservice.h: AFS Cache Manager Service declarations
2298 + *
2299 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
2300 + * Written by David Howells (dhowells@redhat.com)
2301 + *
2302 + * This program is free software; you can redistribute it and/or
2303 + * modify it under the terms of the GNU General Public License
2304 + * as published by the Free Software Foundation; either version
2305 + * 2 of the License, or (at your option) any later version.
2306 + */
2307 +
2308 +#ifndef _LINUX_AFS_CMSERVICE_H
2309 +#define _LINUX_AFS_CMSERVICE_H
2310 +
2311 +#include <rxrpc/transport.h>
2312 +#include "types.h"
2313 +
2314 +/* cache manager start/stop */
2315 +extern int afscm_start(void);
2316 +extern void afscm_stop(void);
2317 +
2318 +/* cache manager server functions */
2319 +extern int SRXAFSCM_InitCallBackState(afs_server_t *server);
2320 +extern int SRXAFSCM_CallBack(afs_server_t *server, size_t count, afs_callback_t callbacks[]);
2321 +extern int SRXAFSCM_Probe(afs_server_t *server);
2322 +
2323 +#endif /* _LINUX_AFS_CMSERVICE_H */
2324 diff -urNp linux-5240/fs/afs/dir.c linux-5250/fs/afs/dir.c
2325 --- linux-5240/fs/afs/dir.c     1970-01-01 01:00:00.000000000 +0100
2326 +++ linux-5250/fs/afs/dir.c     
2327 @@ -0,0 +1,640 @@
2328 +/* dir.c: AFS filesystem directory handling
2329 + *
2330 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
2331 + * Written by David Howells (dhowells@redhat.com)
2332 + *
2333 + * This program is free software; you can redistribute it and/or
2334 + * modify it under the terms of the GNU General Public License
2335 + * as published by the Free Software Foundation; either version
2336 + * 2 of the License, or (at your option) any later version.
2337 + */
2338 +
2339 +#include <linux/kernel.h>
2340 +#include <linux/module.h>
2341 +#include <linux/init.h>
2342 +#include <linux/sched.h>
2343 +#include <linux/slab.h>
2344 +#include <linux/fs.h>
2345 +#include <linux/pagemap.h>
2346 +#include <linux/smp_lock.h>
2347 +#include "vnode.h"
2348 +#include "volume.h"
2349 +#include <rxrpc/call.h>
2350 +#include "super.h"
2351 +#include "internal.h"
2352 +
2353 +static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry);
2354 +static int afs_dir_open(struct inode *inode, struct file *file);
2355 +static int afs_dir_readdir(struct file *file, void *dirent, filldir_t filldir);
2356 +static int afs_d_revalidate(struct dentry *dentry, int flags);
2357 +static int afs_d_delete(struct dentry *dentry);
2358 +static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, loff_t fpos,
2359 +                                    ino_t ino, unsigned dtype);
2360 +
2361 +struct file_operations afs_dir_file_operations = {
2362 +       open:           afs_dir_open,
2363 +       readdir:        afs_dir_readdir,
2364 +};
2365 +
2366 +struct inode_operations afs_dir_inode_operations = {
2367 +       lookup:         afs_dir_lookup,
2368 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
2369 +       getattr:        afs_inode_getattr,
2370 +#else
2371 +       revalidate:     afs_inode_revalidate,
2372 +#endif
2373 +//     create:         afs_dir_create,
2374 +//     link:           afs_dir_link,
2375 +//     unlink:         afs_dir_unlink,
2376 +//     symlink:        afs_dir_symlink,
2377 +//     mkdir:          afs_dir_mkdir,
2378 +//     rmdir:          afs_dir_rmdir,
2379 +//     mknod:          afs_dir_mknod,
2380 +//     rename:         afs_dir_rename,
2381 +};
2382 +
2383 +static struct dentry_operations afs_fs_dentry_operations = {
2384 +       d_revalidate:   afs_d_revalidate,
2385 +       d_delete:       afs_d_delete,
2386 +};
2387 +
2388 +#define AFS_DIR_HASHTBL_SIZE   128
2389 +#define AFS_DIR_DIRENT_SIZE    32
2390 +#define AFS_DIRENT_PER_BLOCK   64
2391 +
2392 +typedef struct afs_dirent {
2393 +               u8      valid;
2394 +               u8      unused[1];
2395 +               u16     hash_next;
2396 +               u32     vnode;
2397 +               u32     unique;
2398 +               u8      name[16];
2399 +               u8      overflow[4];    /* if any char of the name (inc NUL) reaches here, consume
2400 +                                        * the next dirent too */
2401 +       u8      extended_name[32];
2402 +} afs_dirent_t;
2403 +
2404 +/* AFS directory page header (one at the beginning of every 2048-byte chunk) */
2405 +typedef struct afs_dir_pagehdr {
2406 +       u16     npages;
2407 +       u16     magic;
2408 +#define AFS_DIR_MAGIC htons(1234)
2409 +       u8      nentries;
2410 +       u8      bitmap[8];
2411 +       u8      pad[19];
2412 +} afs_dir_pagehdr_t;
2413 +
2414 +/* directory block layout */
2415 +typedef union afs_dir_block {
2416 +
2417 +       afs_dir_pagehdr_t pagehdr;
2418 +
2419 +       struct {
2420 +               afs_dir_pagehdr_t pagehdr;
2421 +               u8              alloc_ctrs[128];
2422 +               u16             hashtable[AFS_DIR_HASHTBL_SIZE]; /* dir hash table */
2423 +       } hdr;
2424 +
2425 +       afs_dirent_t dirents[AFS_DIRENT_PER_BLOCK];
2426 +} afs_dir_block_t;
2427 +
2428 +/* layout on a linux VM page */
2429 +typedef struct afs_dir_page {
2430 +       afs_dir_block_t blocks[PAGE_SIZE/sizeof(afs_dir_block_t)];
2431 +} afs_dir_page_t;
2432 +
2433 +struct afs_dir_lookup_cookie {
2434 +       afs_fid_t       fid;
2435 +       const char      *name;
2436 +       size_t          nlen;
2437 +       int             found;
2438 +};
2439 +
2440 +/*****************************************************************************/
2441 +/*
2442 + * check that a directory page is valid
2443 + */
2444 +static inline void afs_dir_check_page(struct inode *dir, struct page *page)
2445 +{
2446 +       afs_dir_page_t *dbuf;
2447 +       loff_t latter;
2448 +       int tmp, qty;
2449 +
2450 +#if 0
2451 +       /* check the page count */
2452 +       qty = desc.size/sizeof(dbuf->blocks[0]);
2453 +       if (qty==0)
2454 +               goto error;
2455 +
2456 +       if (page->index==0 && qty!=ntohs(dbuf->blocks[0].pagehdr.npages)) {
2457 +               printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n",
2458 +                      __FUNCTION__,dir->i_ino,qty,ntohs(dbuf->blocks[0].pagehdr.npages));
2459 +               goto error;
2460 +       }
2461 +#endif
2462 +
2463 +       /* determine how many magic numbers there should be in this page */
2464 +       latter = dir->i_size - (page->index << PAGE_CACHE_SHIFT);
2465 +       if (latter >= PAGE_SIZE)
2466 +               qty = PAGE_SIZE;
2467 +       else
2468 +               qty = latter;
2469 +       qty /= sizeof(afs_dir_block_t);
2470 +
2471 +       /* check them */
2472 +       dbuf = page_address(page);
2473 +       for (tmp=0; tmp<qty; tmp++) {
2474 +               if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) {
2475 +                       printk("kAFS: %s(%lu): bad magic %d/%d is %04hx\n",
2476 +                              __FUNCTION__,dir->i_ino,tmp,
2477 +                              qty,ntohs(dbuf->blocks[tmp].pagehdr.magic));
2478 +                       goto error;
2479 +               }
2480 +       }
2481 +
2482 +       SetPageChecked(page);
2483 +       return;
2484 +
2485 + error:
2486 +       SetPageChecked(page);
2487 +       SetPageError(page);
2488 +
2489 +} /* end afs_dir_check_page() */
2490 +
2491 +/*****************************************************************************/
2492 +/*
2493 + * discard a page cached in the pagecache
2494 + */
2495 +static inline void afs_dir_put_page(struct page *page)
2496 +{
2497 +       kunmap(page);
2498 +       page_cache_release(page);
2499 +
2500 +} /* end afs_dir_put_page() */
2501 +
2502 +/*****************************************************************************/
2503 +/*
2504 + * get a page into the pagecache
2505 + */
2506 +static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
2507 +{
2508 +       struct page *page;
2509 +
2510 +       _enter("{%lu},%lu",dir->i_ino,index);
2511 +
2512 +       page = read_cache_page(dir->i_mapping,index,
2513 +                              (filler_t*)dir->i_mapping->a_ops->readpage,NULL);
2514 +       if (!IS_ERR(page)) {
2515 +               wait_on_page_locked(page);
2516 +               kmap(page);
2517 +               if (!PageUptodate(page))
2518 +                       goto fail;
2519 +               if (!PageChecked(page))
2520 +                       afs_dir_check_page(dir,page);
2521 +               if (PageError(page))
2522 +                       goto fail;
2523 +       }
2524 +       return page;
2525 +
2526 + fail:
2527 +       afs_dir_put_page(page);
2528 +       return ERR_PTR(-EIO);
2529 +} /* end afs_dir_get_page() */
2530 +
2531 +/*****************************************************************************/
2532 +/*
2533 + * open an AFS directory file
2534 + */
2535 +static int afs_dir_open(struct inode *inode, struct file *file)
2536 +{
2537 +       _enter("{%lu}",inode->i_ino);
2538 +
2539 +       if (sizeof(afs_dir_block_t) != 2048) BUG();
2540 +       if (sizeof(afs_dirent_t) != 32) BUG();
2541 +
2542 +       if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED)
2543 +               return -ENOENT;
2544 +
2545 +       _leave(" = 0");
2546 +       return 0;
2547 +
2548 +} /* end afs_dir_open() */
2549 +
2550 +/*****************************************************************************/
2551 +/*
2552 + * deal with one block in an AFS directory
2553 + */
2554 +static int afs_dir_iterate_block(unsigned *fpos,
2555 +                                   afs_dir_block_t *block,
2556 +                                   unsigned blkoff,
2557 +                                   void *cookie,
2558 +                                   filldir_t filldir)
2559 +{
2560 +       afs_dirent_t *dire;
2561 +       unsigned offset, next, curr;
2562 +       size_t nlen;
2563 +       int tmp, ret;
2564 +
2565 +       _enter("%u,%x,%p,,",*fpos,blkoff,block);
2566 +
2567 +       curr = (*fpos - blkoff) / sizeof(afs_dirent_t);
2568 +
2569 +       /* walk through the block, an entry at a time */
2570 +       for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries;
2571 +            offset < AFS_DIRENT_PER_BLOCK;
2572 +            offset = next
2573 +            ) {
2574 +               next = offset + 1;
2575 +
2576 +               /* skip entries marked unused in the bitmap */
2577 +               if (!(block->pagehdr.bitmap[offset/8] & (1 << (offset % 8)))) {
2578 +                       _debug("ENT[%u.%u]: unused\n",blkoff/sizeof(afs_dir_block_t),offset);
2579 +                       if (offset>=curr)
2580 +                               *fpos = blkoff + next * sizeof(afs_dirent_t);
2581 +                       continue;
2582 +               }
2583 +
2584 +               /* got a valid entry */
2585 +               dire = &block->dirents[offset];
2586 +               nlen = strnlen(dire->name,sizeof(*block) - offset*sizeof(afs_dirent_t));
2587 +
2588 +               _debug("ENT[%u.%u]: %s %u \"%.*s\"\n",
2589 +                      blkoff/sizeof(afs_dir_block_t),offset,
2590 +                      offset<curr ? "skip" : "fill",
2591 +                      nlen,nlen,dire->name);
2592 +
2593 +               /* work out where the next possible entry is */
2594 +               for (tmp=nlen; tmp>15; tmp-=sizeof(afs_dirent_t)) {
2595 +                       if (next>=AFS_DIRENT_PER_BLOCK) {
2596 +                               _debug("ENT[%u.%u]:"
2597 +                                      " %u travelled beyond end dir block (len %u/%u)\n",
2598 +                                      blkoff/sizeof(afs_dir_block_t),offset,next,tmp,nlen);
2599 +                               return -EIO;
2600 +                       }
2601 +                       if (!(block->pagehdr.bitmap[next/8] & (1 << (next % 8)))) {
2602 +                               _debug("ENT[%u.%u]: %u unmarked extension (len %u/%u)\n",
2603 +                                      blkoff/sizeof(afs_dir_block_t),offset,next,tmp,nlen);
2604 +                               return -EIO;
2605 +                       }
2606 +
2607 +                       _debug("ENT[%u.%u]: ext %u/%u\n",
2608 +                              blkoff/sizeof(afs_dir_block_t),next,tmp,nlen);
2609 +                       next++;
2610 +               }
2611 +
2612 +               /* skip if starts before the current position */
2613 +               if (offset<curr)
2614 +                       continue;
2615 +
2616 +               /* found the next entry */
2617 +               ret = filldir(cookie,
2618 +                             dire->name,
2619 +                             nlen,
2620 +                             blkoff + offset * sizeof(afs_dirent_t),
2621 +                             ntohl(dire->vnode),
2622 +                             filldir==afs_dir_lookup_filldir ? dire->unique : DT_UNKNOWN);
2623 +               if (ret<0) {
2624 +                       _leave(" = 0 [full]");
2625 +                       return 0;
2626 +               }
2627 +
2628 +               *fpos = blkoff + next * sizeof(afs_dirent_t);
2629 +       }
2630 +
2631 +       _leave(" = 1 [more]");
2632 +       return 1;
2633 +} /* end afs_dir_iterate_block() */
2634 +
2635 +/*****************************************************************************/
2636 +/*
2637 + * read an AFS directory
2638 + */
2639 +static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, filldir_t filldir)
2640 +{
2641 +       afs_dir_block_t *dblock;
2642 +       afs_dir_page_t *dbuf;
2643 +       struct page *page;
2644 +       unsigned blkoff, limit;
2645 +       int ret;
2646 +
2647 +       _enter("{%lu},%u,,",dir->i_ino,*fpos);
2648 +
2649 +       if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
2650 +               _leave(" = -ESTALE");
2651 +               return -ESTALE;
2652 +       }
2653 +
2654 +       /* round the file position up to the next entry boundary */
2655 +       *fpos += sizeof(afs_dirent_t) - 1;
2656 +       *fpos &= ~(sizeof(afs_dirent_t) - 1);
2657 +
2658 +       /* walk through the blocks in sequence */
2659 +       ret = 0;
2660 +       while (*fpos < dir->i_size) {
2661 +               blkoff = *fpos & ~(sizeof(afs_dir_block_t) - 1);
2662 +
2663 +               /* fetch the appropriate page from the directory */
2664 +               page = afs_dir_get_page(dir,blkoff/PAGE_SIZE);
2665 +               if (IS_ERR(page)) {
2666 +                       ret = PTR_ERR(page);
2667 +                       break;
2668 +               }
2669 +
2670 +               limit = blkoff & ~(PAGE_SIZE-1);
2671 +
2672 +               dbuf = page_address(page);
2673 +
2674 +               /* deal with the individual blocks stashed on this page */
2675 +               do {
2676 +                       dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) / sizeof(afs_dir_block_t)];
2677 +                       ret = afs_dir_iterate_block(fpos,dblock,blkoff,cookie,filldir);
2678 +                       if (ret!=1) {
2679 +                               afs_dir_put_page(page);
2680 +                               goto out;
2681 +                       }
2682 +
2683 +                       blkoff += sizeof(afs_dir_block_t);
2684 +
2685 +               } while (*fpos < dir->i_size && blkoff < limit);
2686 +
2687 +               afs_dir_put_page(page);
2688 +               ret = 0;
2689 +       }
2690 +
2691 + out:
2692 +       _leave(" = %d",ret);
2693 +       return ret;
2694 +} /* end afs_dir_iterate() */
2695 +
2696 +/*****************************************************************************/
2697 +/*
2698 + * read an AFS directory
2699 + */
2700 +static int afs_dir_readdir(struct file *file, void *cookie, filldir_t filldir)
2701 +{
2702 +       unsigned fpos;
2703 +       int ret;
2704 +
2705 +       _enter("{%Ld,{%lu}}",file->f_pos,file->f_dentry->d_inode->i_ino);
2706 +
2707 +       fpos = file->f_pos;
2708 +       ret = afs_dir_iterate(file->f_dentry->d_inode,&fpos,cookie,filldir);
2709 +       file->f_pos = fpos;
2710 +
2711 +       _leave(" = %d",ret);
2712 +       return ret;
2713 +} /* end afs_dir_readdir() */
2714 +
2715 +/*****************************************************************************/
2716 +/*
2717 + * search the directory for a name
2718 + * - if afs_dir_iterate_block() spots this function, it'll pass the FID uniquifier through dtype
2719 + */
2720 +static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, loff_t fpos,
2721 +                                 ino_t ino, unsigned dtype)
2722 +{
2723 +       struct afs_dir_lookup_cookie *cookie = _cookie;
2724 +
2725 +       _enter("{%s,%u},%s,%u,,%lu,%u",cookie->name,cookie->nlen,name,nlen,ino,ntohl(dtype));
2726 +
2727 +       if (cookie->nlen != nlen || memcmp(cookie->name,name,nlen)!=0) {
2728 +               _leave(" = 0 [no]");
2729 +               return 0;
2730 +       }
2731 +
2732 +       cookie->fid.vnode = ino;
2733 +       cookie->fid.unique = ntohl(dtype);
2734 +       cookie->found = 1;
2735 +
2736 +       _leave(" = -1 [found]");
2737 +       return -1;
2738 +} /* end afs_dir_lookup_filldir() */
2739 +
2740 +/*****************************************************************************/
2741 +/*
2742 + * look up an entry in a directory
2743 + */
2744 +static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry)
2745 +{
2746 +       struct afs_dir_lookup_cookie cookie;
2747 +       struct afs_super_info *as;
2748 +       struct inode *inode;
2749 +       afs_vnode_t *vnode;
2750 +       unsigned fpos;
2751 +       int ret;
2752 +
2753 +       _enter("{%lu},{%s}",dir->i_ino,dentry->d_name.name);
2754 +
2755 +       /* insanity checks first */
2756 +       if (sizeof(afs_dir_block_t) != 2048) BUG();
2757 +       if (sizeof(afs_dirent_t) != 32) BUG();
2758 +
2759 +       if (dentry->d_name.len > 255) {
2760 +               _leave(" = -ENAMETOOLONG");
2761 +               return ERR_PTR(-ENAMETOOLONG);
2762 +       }
2763 +
2764 +       vnode = AFS_FS_I(dir);
2765 +       if (vnode->flags & AFS_VNODE_DELETED) {
2766 +               _leave(" = -ESTALE");
2767 +               return ERR_PTR(-ESTALE);
2768 +       }
2769 +
2770 +       as = dir->i_sb->u.generic_sbp;
2771 +
2772 +       /* search the directory */
2773 +       cookie.name     = dentry->d_name.name;
2774 +       cookie.nlen     = dentry->d_name.len;
2775 +       cookie.fid.vid  = as->volume->vid;
2776 +       cookie.found    = 0;
2777 +
2778 +       fpos = 0;
2779 +       ret = afs_dir_iterate(dir,&fpos,&cookie,afs_dir_lookup_filldir);
2780 +       if (ret<0) {
2781 +               _leave(" = %d",ret);
2782 +               return ERR_PTR(ret);
2783 +       }
2784 +
2785 +       ret = -ENOENT;
2786 +       if (!cookie.found) {
2787 +               _leave(" = %d",ret);
2788 +               return ERR_PTR(ret);
2789 +       }
2790 +
2791 +       /* instantiate the dentry */
2792 +       ret = afs_iget(dir->i_sb,&cookie.fid,&inode);
2793 +       if (ret<0) {
2794 +               _leave(" = %d",ret);
2795 +               return ERR_PTR(ret);
2796 +       }
2797 +
2798 +       dentry->d_op = &afs_fs_dentry_operations;
2799 +       dentry->d_fsdata = (void*) (unsigned) vnode->status.version;
2800 +
2801 +       d_add(dentry,inode);
2802 +       _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }",
2803 +              cookie.fid.vnode,
2804 +              cookie.fid.unique,
2805 +              dentry->d_inode->i_ino,
2806 +              dentry->d_inode->i_version);
2807 +
2808 +       return NULL;
2809 +} /* end afs_dir_lookup() */
2810 +
2811 +/*****************************************************************************/
2812 +/*
2813 + * check that a dentry lookup hit has found a valid entry
2814 + * - NOTE! the hit can be a negative hit too, so we can't assume we have an inode
2815 + * (derived from nfs_lookup_revalidate)
2816 + */
2817 +static int afs_d_revalidate(struct dentry *dentry, int flags)
2818 +{
2819 +       struct afs_dir_lookup_cookie cookie;
2820 +       struct dentry *parent;
2821 +       struct inode *inode, *dir;
2822 +       unsigned fpos;
2823 +       int ret;
2824 +
2825 +       _enter("%s,%x",dentry->d_name.name,flags);
2826 +
2827 +       /* lock down the parent dentry so we can peer at it */
2828 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
2829 +       read_lock(&dparent_lock);
2830 +       parent = dget(dentry->d_parent);
2831 +       read_unlock(&dparent_lock);
2832 +#else
2833 +       lock_kernel();
2834 +       parent = dget(dentry->d_parent);
2835 +       unlock_kernel();
2836 +#endif
2837 +
2838 +       dir = parent->d_inode;
2839 +       inode = dentry->d_inode;
2840 +
2841 +       /* handle a negative inode */
2842 +       if (!inode)
2843 +               goto out_bad;
2844 +
2845 +       /* handle a bad inode */
2846 +       if (is_bad_inode(inode)) {
2847 +               printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n",
2848 +                      dentry->d_parent->d_name.name,dentry->d_name.name);
2849 +               goto out_bad;
2850 +       }
2851 +
2852 +       /* force a full look up if the parent directory changed since last the server was consulted
2853 +        * - otherwise this inode must still exist, even if the inode details themselves have
2854 +        *   changed
2855 +        */
2856 +       if (AFS_FS_I(dir)->flags & AFS_VNODE_CHANGED)
2857 +               afs_vnode_fetch_status(AFS_FS_I(dir));
2858 +
2859 +       if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
2860 +               _debug("%s: parent dir deleted",dentry->d_name.name);
2861 +               goto out_bad;
2862 +       }
2863 +
2864 +       if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) {
2865 +               _debug("%s: file already deleted",dentry->d_name.name);
2866 +               goto out_bad;
2867 +       }
2868 +
2869 +       if ((unsigned)dentry->d_fsdata != (unsigned)AFS_FS_I(dir)->status.version) {
2870 +               _debug("%s: parent changed %u -> %u",
2871 +                      dentry->d_name.name,
2872 +                      (unsigned)dentry->d_fsdata,
2873 +                      (unsigned)AFS_FS_I(dir)->status.version);
2874 +
2875 +               /* search the directory for this vnode */
2876 +               cookie.name     = dentry->d_name.name;
2877 +               cookie.nlen     = dentry->d_name.len;
2878 +               cookie.fid.vid  = AFS_FS_I(inode)->volume->vid;
2879 +               cookie.found    = 0;
2880 +
2881 +               fpos = 0;
2882 +               ret = afs_dir_iterate(dir,&fpos,&cookie,afs_dir_lookup_filldir);
2883 +               if (ret<0) {
2884 +                       _debug("failed to iterate dir %s: %d",parent->d_name.name,ret);
2885 +                       goto out_bad;
2886 +               }
2887 +
2888 +               if (!cookie.found) {
2889 +                       _debug("%s: dirent not found",dentry->d_name.name);
2890 +                       goto not_found;
2891 +               }
2892 +
2893 +               /* if the vnode ID has changed, then the dirent points to a different file */
2894 +               if (cookie.fid.vnode!=AFS_FS_I(inode)->fid.vnode) {
2895 +                       _debug("%s: dirent changed",dentry->d_name.name);
2896 +                       goto not_found;
2897 +               }
2898 +
2899 +               /* if the vnode ID uniqifier has changed, then the file has been deleted */
2900 +               if (cookie.fid.unique!=AFS_FS_I(inode)->fid.unique) {
2901 +                       _debug("%s: file deleted (uq %u -> %u I:%lu)",
2902 +                              dentry->d_name.name,
2903 +                              cookie.fid.unique,
2904 +                              AFS_FS_I(inode)->fid.unique,
2905 +                              inode->i_version);
2906 +                       spin_lock(&AFS_FS_I(inode)->lock);
2907 +                       AFS_FS_I(inode)->flags |= AFS_VNODE_DELETED;
2908 +                       spin_unlock(&AFS_FS_I(inode)->lock);
2909 +                       invalidate_inode_pages(inode);
2910 +                       goto out_bad;
2911 +               }
2912 +
2913 +               dentry->d_fsdata = (void*) (unsigned) AFS_FS_I(dir)->status.version;
2914 +       }
2915 +
2916 + out_valid:
2917 +       dput(parent);
2918 +       _leave(" = 1 [valid]");
2919 +       return 1;
2920 +
2921 +       /* the dirent, if it exists, now points to a different vnode */
2922 + not_found:
2923 +       dentry->d_flags |= DCACHE_NFSFS_RENAMED;
2924 +
2925 + out_bad:
2926 +       if (inode) {
2927 +               /* don't unhash if we have submounts */
2928 +               if (have_submounts(dentry))
2929 +                       goto out_valid;
2930 +       }
2931 +
2932 +       shrink_dcache_parent(dentry);
2933 +
2934 +       _debug("dropping dentry %s/%s",dentry->d_parent->d_name.name,dentry->d_name.name);
2935 +       d_drop(dentry);
2936 +
2937 +       dput(parent);
2938 +
2939 +       _leave(" = 0 [bad]");
2940 +       return 0;
2941 +} /* end afs_d_revalidate() */
2942 +
2943 +/*****************************************************************************/
2944 +/*
2945 + * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't sleep)
2946 + * - called from dput() when d_count is going to 0.
2947 + * - return 1 to request dentry be unhashed, 0 otherwise
2948 + */
2949 +static int afs_d_delete(struct dentry *dentry)
2950 +{
2951 +       _enter("%s",dentry->d_name.name);
2952 +
2953 +       if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
2954 +               return 1;
2955 +
2956 +       if (dentry->d_inode) {
2957 +               if (AFS_FS_I(dentry->d_inode)->flags & AFS_VNODE_DELETED)
2958 +                       goto zap;
2959 +       }
2960 +
2961 +       _leave(" = 0 [keep]");
2962 +       return 0;
2963 +
2964 + zap:
2965 +       _leave(" = 1 [zap]");
2966 +       return 1;
2967 +} /* end afs_d_delete() */
2968 diff -urNp linux-5240/fs/afs/errors.h linux-5250/fs/afs/errors.h
2969 --- linux-5240/fs/afs/errors.h  1970-01-01 01:00:00.000000000 +0100
2970 +++ linux-5250/fs/afs/errors.h  
2971 @@ -0,0 +1,34 @@
2972 +/* errors.h: AFS abort/error codes
2973 + *
2974 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
2975 + * Written by David Howells (dhowells@redhat.com)
2976 + *
2977 + * This program is free software; you can redistribute it and/or
2978 + * modify it under the terms of the GNU General Public License
2979 + * as published by the Free Software Foundation; either version
2980 + * 2 of the License, or (at your option) any later version.
2981 + */
2982 +
2983 +#ifndef _H_DB712916_5113_11D6_9A6D_0002B3163499
2984 +#define _H_DB712916_5113_11D6_9A6D_0002B3163499
2985 +
2986 +#include "types.h"
2987 +
2988 +/* file server abort codes */
2989 +typedef enum {
2990 +       VSALVAGE        = 101,  /* volume needs salvaging */
2991 +       VNOVNODE        = 102,  /* no such file/dir (vnode) */
2992 +       VNOVOL          = 103,  /* no such volume or volume unavailable */
2993 +       VVOLEXISTS      = 104,  /* volume name already exists */
2994 +       VNOSERVICE      = 105,  /* volume not currently in service */
2995 +       VOFFLINE        = 106,  /* volume is currently offline (more info available [VVL-spec]) */
2996 +       VONLINE         = 107,  /* volume is already online */
2997 +       VDISKFULL       = 108,  /* disk partition is full */
2998 +       VOVERQUOTA      = 109,  /* volume's maximum quota exceeded */
2999 +       VBUSY           = 110,  /* volume is temporarily unavailable */
3000 +       VMOVED          = 111,  /* volume moved to new server - ask this FS where */
3001 +} afs_rxfs_abort_t;
3002 +
3003 +extern int afs_abort_to_error(int abortcode);
3004 +
3005 +#endif /* _H_DB712916_5113_11D6_9A6D_0002B3163499 */
3006 diff -urNp linux-5240/fs/afs/file.c linux-5250/fs/afs/file.c
3007 --- linux-5240/fs/afs/file.c    1970-01-01 01:00:00.000000000 +0100
3008 +++ linux-5250/fs/afs/file.c    
3009 @@ -0,0 +1,143 @@
3010 +/* file.c: AFS filesystem file handling
3011 + *
3012 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
3013 + * Written by David Howells (dhowells@redhat.com)
3014 + *
3015 + * This program is free software; you can redistribute it and/or
3016 + * modify it under the terms of the GNU General Public License
3017 + * as published by the Free Software Foundation; either version
3018 + * 2 of the License, or (at your option) any later version.
3019 + */
3020 +
3021 +#include <linux/kernel.h>
3022 +#include <linux/module.h>
3023 +#include <linux/init.h>
3024 +#include <linux/sched.h>
3025 +#include <linux/slab.h>
3026 +#include <linux/fs.h>
3027 +#include <linux/pagemap.h>
3028 +#include "volume.h"
3029 +#include "vnode.h"
3030 +#include <rxrpc/call.h>
3031 +#include "internal.h"
3032 +
3033 +//static int afs_file_open(struct inode *inode, struct file *file);
3034 +//static int afs_file_release(struct inode *inode, struct file *file);
3035 +
3036 +static int afs_file_readpage(struct file *file, struct page *page);
3037 +
3038 +//static ssize_t afs_file_read(struct file *file, char *buf, size_t size, loff_t *off);
3039 +
3040 +static ssize_t afs_file_write(struct file *file, const char *buf, size_t size, loff_t *off);
3041 +
3042 +struct inode_operations afs_file_inode_operations = {
3043 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
3044 +       getattr:        afs_inode_getattr,
3045 +#else
3046 +       revalidate:     afs_inode_revalidate,
3047 +#endif
3048 +};
3049 +
3050 +struct file_operations afs_file_file_operations = {
3051 +//     open:           afs_file_open,
3052 +//     release:        afs_file_release,
3053 +       read:           generic_file_read, //afs_file_read,
3054 +       write:          afs_file_write,
3055 +       mmap:           generic_file_mmap,
3056 +//     fsync:          afs_file_fsync,
3057 +};
3058 +
3059 +struct address_space_operations afs_fs_aops = {
3060 +       readpage:       afs_file_readpage,
3061 +};
3062 +
3063 +/*****************************************************************************/
3064 +/*
3065 + * AFS file read
3066 + */
3067 +#if 0
3068 +static ssize_t afs_file_read(struct file *file, char *buf, size_t size, loff_t *off)
3069 +{
3070 +       struct afs_inode_info *ai;
3071 +
3072 +       ai = AFS_FS_I(file->f_dentry->d_inode);
3073 +       if (ai->flags & AFS_INODE_DELETED)
3074 +               return -ESTALE;
3075 +
3076 +       return -EIO;
3077 +} /* end afs_file_read() */
3078 +#endif
3079 +
3080 +/*****************************************************************************/
3081 +/*
3082 + * AFS file write
3083 + */
3084 +static ssize_t afs_file_write(struct file *file, const char *buf, size_t size, loff_t *off)
3085 +{
3086 +       afs_vnode_t *vnode;
3087 +
3088 +       vnode = AFS_FS_I(file->f_dentry->d_inode);
3089 +       if (vnode->flags & AFS_VNODE_DELETED)
3090 +               return -ESTALE;
3091 +
3092 +       return -EIO;
3093 +} /* end afs_file_write() */
3094 +
3095 +/*****************************************************************************/
3096 +/*
3097 + * AFS read page from file (or symlink)
3098 + */
3099 +static int afs_file_readpage(struct file *file, struct page *page)
3100 +{
3101 +       struct afs_rxfs_fetch_descriptor desc;
3102 +       struct inode *inode;
3103 +       afs_vnode_t *vnode;
3104 +       int ret;
3105 +
3106 +       inode = page->mapping->host;
3107 +
3108 +       _enter("{%lu},{%lu}",inode->i_ino,page->index);
3109 +
3110 +       vnode = AFS_FS_I(inode);
3111 +
3112 +       if (!PageLocked(page))
3113 +               PAGE_BUG(page);
3114 +
3115 +       ret = -ESTALE;
3116 +       if (vnode->flags & AFS_VNODE_DELETED)
3117 +               goto error;
3118 +
3119 +       /* work out how much to get and from where */
3120 +       desc.fid        = vnode->fid;
3121 +       desc.offset     = page->index << PAGE_CACHE_SHIFT;
3122 +       desc.size       = min((size_t)(inode->i_size - desc.offset),(size_t)PAGE_SIZE);
3123 +       desc.buffer     = kmap(page);
3124 +
3125 +       clear_page(desc.buffer);
3126 +
3127 +       /* read the contents of the file from the server into the page */
3128 +       ret = afs_vnode_fetch_data(vnode,&desc);
3129 +       kunmap(page);
3130 +       if (ret<0) {
3131 +               if (ret==-ENOENT) {
3132 +                       _debug("got NOENT from server - marking file deleted and stale");
3133 +                       vnode->flags |= AFS_VNODE_DELETED;
3134 +                       ret = -ESTALE;
3135 +               }
3136 +               goto error;
3137 +       }
3138 +
3139 +       SetPageUptodate(page);
3140 +       unlock_page(page);
3141 +
3142 +       _leave(" = 0");
3143 +       return 0;
3144 +
3145 + error:
3146 +       SetPageError(page);
3147 +       unlock_page(page);
3148 +
3149 +       _leave(" = %d",ret);
3150 +       return ret;
3151 +
3152 +} /* end afs_file_readpage() */
3153 diff -urNp linux-5240/fs/afs/fsclient.c linux-5250/fs/afs/fsclient.c
3154 --- linux-5240/fs/afs/fsclient.c        1970-01-01 01:00:00.000000000 +0100
3155 +++ linux-5250/fs/afs/fsclient.c        
3156 @@ -0,0 +1,816 @@
3157 +/* fsclient.c: AFS File Server client stubs
3158 + *
3159 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
3160 + * Written by David Howells (dhowells@redhat.com)
3161 + *
3162 + * This program is free software; you can redistribute it and/or
3163 + * modify it under the terms of the GNU General Public License
3164 + * as published by the Free Software Foundation; either version
3165 + * 2 of the License, or (at your option) any later version.
3166 + */
3167 +
3168 +#include <linux/init.h>
3169 +#include <linux/sched.h>
3170 +#include <rxrpc/rxrpc.h>
3171 +#include <rxrpc/transport.h>
3172 +#include <rxrpc/connection.h>
3173 +#include <rxrpc/call.h>
3174 +#include "fsclient.h"
3175 +#include "cmservice.h"
3176 +#include "vnode.h"
3177 +#include "server.h"
3178 +#include "errors.h"
3179 +#include "internal.h"
3180 +
3181 +#define FSFETCHSTATUS          132     /* AFS Fetch file status */
3182 +#define FSFETCHDATA            130     /* AFS Fetch file data */
3183 +#define FSGIVEUPCALLBACKS      147     /* AFS Discard server callback promises */
3184 +#define FSGETVOLUMEINFO                148     /* AFS Get root volume information */
3185 +#define FSGETROOTVOLUME                151     /* AFS Get root volume name */
3186 +#define FSLOOKUP               161     /* AFS lookup file in directory */
3187 +
3188 +/*****************************************************************************/
3189 +/*
3190 + * map afs abort codes to/from Linux error codes
3191 + * - called with call->lock held
3192 + */
3193 +static void afs_rxfs_aemap(struct rxrpc_call *call)
3194 +{
3195 +       switch (call->app_err_state) {
3196 +       case RXRPC_ESTATE_LOCAL_ABORT:
3197 +               call->app_abort_code = -call->app_errno;
3198 +               break;
3199 +       case RXRPC_ESTATE_PEER_ABORT:
3200 +               call->app_errno = afs_abort_to_error(call->app_abort_code);
3201 +               break;
3202 +       default:
3203 +               break;
3204 +       }
3205 +} /* end afs_rxfs_aemap() */
3206 +
3207 +/*****************************************************************************/
3208 +/*
3209 + * get the root volume name from a fileserver
3210 + * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
3211 + */
3212 +#if 0
3213 +int afs_rxfs_get_root_volume(afs_server_t *server, char *buf, size_t *buflen)
3214 +{
3215 +       DECLARE_WAITQUEUE(myself,current);
3216 +
3217 +       struct rxrpc_connection *conn;
3218 +       struct rxrpc_call *call;
3219 +       struct iovec piov[2];
3220 +       size_t sent;
3221 +       int ret;
3222 +       u32 param[1];
3223 +
3224 +       kenter("%p,%p,%u",server,buf,*buflen);
3225 +
3226 +       /* get hold of the fileserver connection */
3227 +       ret = afs_server_get_fsconn(server,&conn);
3228 +       if (ret<0)
3229 +               goto out;
3230 +
3231 +       /* create a call through that connection */
3232 +       ret = rxrpc_create_call(conn,NULL,NULL,afs_rxfs_aemap,&call);
3233 +       if (ret<0) {
3234 +               printk("kAFS: Unable to create call: %d\n",ret);
3235 +               goto out_put_conn;
3236 +       }
3237 +       call->app_opcode = FSGETROOTVOLUME;
3238 +
3239 +       /* we want to get event notifications from the call */
3240 +       add_wait_queue(&call->waitq,&myself);
3241 +
3242 +       /* marshall the parameters */
3243 +       param[0] = htonl(FSGETROOTVOLUME);
3244 +
3245 +       piov[0].iov_len = sizeof(param);
3246 +       piov[0].iov_base = param;
3247 +
3248 +       /* send the parameters to the server */
3249 +       ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3250 +       if (ret<0)
3251 +               goto abort;
3252 +
3253 +       /* wait for the reply to completely arrive */
3254 +       for (;;) {
3255 +               set_current_state(TASK_INTERRUPTIBLE);
3256 +               if (call->app_call_state!=RXRPC_CSTATE_CLNT_RCV_REPLY ||
3257 +                   signal_pending(current))
3258 +                       break;
3259 +               schedule();
3260 +       }
3261 +       set_current_state(TASK_RUNNING);
3262 +
3263 +       ret = -EINTR;
3264 +       if (signal_pending(current))
3265 +               goto abort;
3266 +
3267 +       switch (call->app_call_state) {
3268 +       case RXRPC_CSTATE_ERROR:
3269 +               ret = call->app_errno;
3270 +               kdebug("Got Error: %d",ret);
3271 +               goto out_unwait;
3272 +
3273 +       case RXRPC_CSTATE_CLNT_GOT_REPLY:
3274 +               /* read the reply */
3275 +               kdebug("Got Reply: qty=%d",call->app_ready_qty);
3276 +
3277 +               ret = -EBADMSG;
3278 +               if (call->app_ready_qty <= 4)
3279 +                       goto abort;
3280 +
3281 +               ret = rxrpc_call_read_data(call,NULL,call->app_ready_qty,0);
3282 +               if (ret<0)
3283 +                       goto abort;
3284 +
3285 +#if 0
3286 +               /* unmarshall the reply */
3287 +               bp = buffer;
3288 +               for (loop=0; loop<65; loop++)
3289 +                       entry->name[loop] = ntohl(*bp++);
3290 +               entry->name[64] = 0;
3291 +
3292 +               entry->type = ntohl(*bp++);
3293 +               entry->num_servers = ntohl(*bp++);
3294 +
3295 +               for (loop=0; loop<8; loop++)
3296 +                       entry->servers[loop].addr.s_addr = *bp++;
3297 +
3298 +               for (loop=0; loop<8; loop++)
3299 +                       entry->servers[loop].partition = ntohl(*bp++);
3300 +
3301 +               for (loop=0; loop<8; loop++)
3302 +                       entry->servers[loop].flags = ntohl(*bp++);
3303 +
3304 +               for (loop=0; loop<3; loop++)
3305 +                       entry->volume_ids[loop] = ntohl(*bp++);
3306 +
3307 +               entry->clone_id = ntohl(*bp++);
3308 +               entry->flags = ntohl(*bp);
3309 +#endif
3310 +
3311 +               /* success */
3312 +               ret = 0;
3313 +               goto out_unwait;
3314 +
3315 +       default:
3316 +               BUG();
3317 +       }
3318 +
3319 + abort:
3320 +       set_current_state(TASK_UNINTERRUPTIBLE);
3321 +       rxrpc_call_abort(call,ret);
3322 +       schedule();
3323 + out_unwait:
3324 +       set_current_state(TASK_RUNNING);
3325 +       remove_wait_queue(&call->waitq,&myself);
3326 +       rxrpc_put_call(call);
3327 + out_put_conn:
3328 +       afs_server_release_fsconn(server,conn);
3329 + out:
3330 +       kleave("");
3331 +       return ret;
3332 +} /* end afs_rxfs_get_root_volume() */
3333 +#endif
3334 +
3335 +/*****************************************************************************/
3336 +/*
3337 + * get information about a volume
3338 + */
3339 +#if 0
3340 +int afs_rxfs_get_volume_info(afs_server_t *server,
3341 +                            const char *name,
3342 +                            afs_volume_info_t *vinfo)
3343 +{
3344 +       DECLARE_WAITQUEUE(myself,current);
3345 +
3346 +       struct rxrpc_connection *conn;
3347 +       struct rxrpc_call *call;
3348 +       struct iovec piov[3];
3349 +       size_t sent;
3350 +       int ret;
3351 +       u32 param[2], *bp, zero;
3352 +
3353 +       _enter("%p,%s,%p",server,name,vinfo);
3354 +
3355 +       /* get hold of the fileserver connection */
3356 +       ret = afs_server_get_fsconn(server,&conn);
3357 +       if (ret<0)
3358 +               goto out;
3359 +
3360 +       /* create a call through that connection */
3361 +       ret = rxrpc_create_call(conn,NULL,NULL,afs_rxfs_aemap,&call);
3362 +       if (ret<0) {
3363 +               printk("kAFS: Unable to create call: %d\n",ret);
3364 +               goto out_put_conn;
3365 +       }
3366 +       call->app_opcode = FSGETVOLUMEINFO;
3367 +
3368 +       /* we want to get event notifications from the call */
3369 +       add_wait_queue(&call->waitq,&myself);
3370 +
3371 +       /* marshall the parameters */
3372 +       piov[1].iov_len = strlen(name);
3373 +       piov[1].iov_base = (char*)name;
3374 +
3375 +       zero = 0;
3376 +       piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
3377 +       piov[2].iov_base = &zero;
3378 +
3379 +       param[0] = htonl(FSGETVOLUMEINFO);
3380 +       param[1] = htonl(piov[1].iov_len);
3381 +
3382 +       piov[0].iov_len = sizeof(param);
3383 +       piov[0].iov_base = param;
3384 +
3385 +       /* send the parameters to the server */
3386 +       ret = rxrpc_call_write_data(call,3,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3387 +       if (ret<0)
3388 +               goto abort;
3389 +
3390 +       /* wait for the reply to completely arrive */
3391 +       bp = rxrpc_call_alloc_scratch(call,64);
3392 +
3393 +       ret = rxrpc_call_read_data(call,bp,64,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
3394 +       if (ret<0) {
3395 +               if (ret==-ECONNABORTED) {
3396 +                       ret = call->app_errno;
3397 +                       goto out_unwait;
3398 +               }
3399 +               goto abort;
3400 +       }
3401 +
3402 +       /* unmarshall the reply */
3403 +       vinfo->vid = ntohl(*bp++);
3404 +       vinfo->type = ntohl(*bp++);
3405 +
3406 +       vinfo->type_vids[0] = ntohl(*bp++);
3407 +       vinfo->type_vids[1] = ntohl(*bp++);
3408 +       vinfo->type_vids[2] = ntohl(*bp++);
3409 +       vinfo->type_vids[3] = ntohl(*bp++);
3410 +       vinfo->type_vids[4] = ntohl(*bp++);
3411 +
3412 +       vinfo->nservers = ntohl(*bp++);
3413 +       vinfo->servers[0].addr.s_addr = *bp++;
3414 +       vinfo->servers[1].addr.s_addr = *bp++;
3415 +       vinfo->servers[2].addr.s_addr = *bp++;
3416 +       vinfo->servers[3].addr.s_addr = *bp++;
3417 +       vinfo->servers[4].addr.s_addr = *bp++;
3418 +       vinfo->servers[5].addr.s_addr = *bp++;
3419 +       vinfo->servers[6].addr.s_addr = *bp++;
3420 +       vinfo->servers[7].addr.s_addr = *bp++;
3421 +
3422 +       ret = -EBADMSG;
3423 +       if (vinfo->nservers>8)
3424 +               goto abort;
3425 +
3426 +       /* success */
3427 +       ret = 0;
3428 +
3429 + out_unwait:
3430 +       set_current_state(TASK_RUNNING);
3431 +       remove_wait_queue(&call->waitq,&myself);
3432 +       rxrpc_put_call(call);
3433 + out_put_conn:
3434 +       afs_server_release_fsconn(server,conn);
3435 + out:
3436 +       _leave("");
3437 +       return ret;
3438 +
3439 + abort:
3440 +       set_current_state(TASK_UNINTERRUPTIBLE);
3441 +       rxrpc_call_abort(call,ret);
3442 +       schedule();
3443 +       goto out_unwait;
3444 +
3445 +} /* end afs_rxfs_get_volume_info() */
3446 +#endif
3447 +
3448 +/*****************************************************************************/
3449 +/*
3450 + * fetch the status information for a file
3451 + */
3452 +int afs_rxfs_fetch_file_status(afs_server_t *server,
3453 +                              afs_vnode_t *vnode,
3454 +                              afs_volsync_t *volsync)
3455 +{
3456 +       DECLARE_WAITQUEUE(myself,current);
3457 +
3458 +       struct afs_server_callslot callslot;
3459 +       struct rxrpc_call *call;
3460 +       struct iovec piov[1];
3461 +       size_t sent;
3462 +       int ret;
3463 +       u32 *bp;
3464 +
3465 +       _enter("%p,{%u,%u,%u}",server,vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
3466 +
3467 +       /* get hold of the fileserver connection */
3468 +       ret = afs_server_request_callslot(server,&callslot);
3469 +       if (ret<0)
3470 +               goto out;
3471 +
3472 +       /* create a call through that connection */
3473 +       ret = rxrpc_create_call(callslot.conn,NULL,NULL,afs_rxfs_aemap,&call);
3474 +       if (ret<0) {
3475 +               printk("kAFS: Unable to create call: %d\n",ret);
3476 +               goto out_put_conn;
3477 +       }
3478 +       call->app_opcode = FSFETCHSTATUS;
3479 +
3480 +       /* we want to get event notifications from the call */
3481 +       add_wait_queue(&call->waitq,&myself);
3482 +
3483 +       /* marshall the parameters */
3484 +       bp = rxrpc_call_alloc_scratch(call,16);
3485 +       bp[0] = htonl(FSFETCHSTATUS);
3486 +       bp[1] = htonl(vnode->fid.vid);
3487 +       bp[2] = htonl(vnode->fid.vnode);
3488 +       bp[3] = htonl(vnode->fid.unique);
3489 +
3490 +       piov[0].iov_len = 16;
3491 +       piov[0].iov_base = bp;
3492 +
3493 +       /* send the parameters to the server */
3494 +       ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3495 +       if (ret<0)
3496 +               goto abort;
3497 +
3498 +       /* wait for the reply to completely arrive */
3499 +       bp = rxrpc_call_alloc_scratch(call,120);
3500 +
3501 +       ret = rxrpc_call_read_data(call,bp,120,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
3502 +       if (ret<0) {
3503 +               if (ret==-ECONNABORTED) {
3504 +                       ret = call->app_errno;
3505 +                       goto out_unwait;
3506 +               }
3507 +               goto abort;
3508 +       }
3509 +
3510 +       /* unmarshall the reply */
3511 +       vnode->status.if_version        = ntohl(*bp++);
3512 +       vnode->status.type              = ntohl(*bp++);
3513 +       vnode->status.nlink             = ntohl(*bp++);
3514 +       vnode->status.size              = ntohl(*bp++);
3515 +       vnode->status.version           = ntohl(*bp++);
3516 +       vnode->status.author            = ntohl(*bp++);
3517 +       vnode->status.owner             = ntohl(*bp++);
3518 +       vnode->status.caller_access     = ntohl(*bp++);
3519 +       vnode->status.anon_access       = ntohl(*bp++);
3520 +       vnode->status.mode              = ntohl(*bp++);
3521 +       vnode->status.parent.vid        = vnode->fid.vid;
3522 +       vnode->status.parent.vnode      = ntohl(*bp++);
3523 +       vnode->status.parent.unique     = ntohl(*bp++);
3524 +       bp++; /* seg size */
3525 +       vnode->status.mtime_client      = ntohl(*bp++);
3526 +       vnode->status.mtime_server      = ntohl(*bp++);
3527 +       bp++; /* group */
3528 +       bp++; /* sync counter */
3529 +       vnode->status.version           |= ((unsigned long long) ntohl(*bp++)) << 32;
3530 +       bp++; /* spare2 */
3531 +       bp++; /* spare3 */
3532 +       bp++; /* spare4 */
3533 +
3534 +       vnode->cb_version               = ntohl(*bp++);
3535 +       vnode->cb_expiry                = ntohl(*bp++);
3536 +       vnode->cb_type                  = ntohl(*bp++);
3537 +
3538 +       if (volsync) {
3539 +               volsync->creation       = ntohl(*bp++);
3540 +               bp++; /* spare2 */
3541 +               bp++; /* spare3 */
3542 +               bp++; /* spare4 */
3543 +               bp++; /* spare5 */
3544 +               bp++; /* spare6 */
3545 +       }
3546 +
3547 +       /* success */
3548 +       ret = 0;
3549 +
3550 + out_unwait:
3551 +       set_current_state(TASK_RUNNING);
3552 +       remove_wait_queue(&call->waitq,&myself);
3553 +       rxrpc_put_call(call);
3554 + out_put_conn:
3555 +       afs_server_release_callslot(server,&callslot);
3556 + out:
3557 +       _leave("");
3558 +       return ret;
3559 +
3560 + abort:
3561 +       set_current_state(TASK_UNINTERRUPTIBLE);
3562 +       rxrpc_call_abort(call,ret);
3563 +       schedule();
3564 +       goto out_unwait;
3565 +} /* end afs_rxfs_fetch_file_status() */
3566 +
3567 +/*****************************************************************************/
3568 +/*
3569 + * fetch the contents of a file or directory
3570 + */
3571 +int afs_rxfs_fetch_file_data(afs_server_t *server,
3572 +                            afs_vnode_t *vnode,
3573 +                            struct afs_rxfs_fetch_descriptor *desc,
3574 +                            afs_volsync_t *volsync)
3575 +{
3576 +       DECLARE_WAITQUEUE(myself,current);
3577 +
3578 +       struct afs_server_callslot callslot;
3579 +       struct rxrpc_call *call;
3580 +       struct iovec piov[1];
3581 +       size_t sent;
3582 +       int ret;
3583 +       u32 *bp;
3584 +
3585 +       _enter("%p,{fid={%u,%u,%u},sz=%u,of=%lu}",
3586 +              server,
3587 +              desc->fid.vid,
3588 +              desc->fid.vnode,
3589 +              desc->fid.unique,
3590 +              desc->size,
3591 +              desc->offset);
3592 +
3593 +       /* get hold of the fileserver connection */
3594 +       ret = afs_server_request_callslot(server,&callslot);
3595 +       if (ret<0)
3596 +               goto out;
3597 +
3598 +       /* create a call through that connection */
3599 +       ret = rxrpc_create_call(callslot.conn,NULL,NULL,afs_rxfs_aemap,&call);
3600 +       if (ret<0) {
3601 +               printk("kAFS: Unable to create call: %d\n",ret);
3602 +               goto out_put_conn;
3603 +       }
3604 +       call->app_opcode = FSFETCHDATA;
3605 +
3606 +       /* we want to get event notifications from the call */
3607 +       add_wait_queue(&call->waitq,&myself);
3608 +
3609 +       /* marshall the parameters */
3610 +       bp = rxrpc_call_alloc_scratch(call,24);
3611 +       bp[0] = htonl(FSFETCHDATA);
3612 +       bp[1] = htonl(desc->fid.vid);
3613 +       bp[2] = htonl(desc->fid.vnode);
3614 +       bp[3] = htonl(desc->fid.unique);
3615 +       bp[4] = htonl(desc->offset);
3616 +       bp[5] = htonl(desc->size);
3617 +
3618 +       piov[0].iov_len = 24;
3619 +       piov[0].iov_base = bp;
3620 +
3621 +       /* send the parameters to the server */
3622 +       ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3623 +       if (ret<0)
3624 +               goto abort;
3625 +
3626 +       /* wait for the data count to arrive */
3627 +       ret = rxrpc_call_read_data(call,bp,4,RXRPC_CALL_READ_BLOCK);
3628 +       if (ret<0)
3629 +               goto read_failed;
3630 +
3631 +       desc->actual = ntohl(bp[0]);
3632 +       if (desc->actual!=desc->size) {
3633 +               ret = -EBADMSG;
3634 +               goto abort;
3635 +       }
3636 +
3637 +       /* call the app to read the actual data */
3638 +       rxrpc_call_reset_scratch(call);
3639 +
3640 +       ret = rxrpc_call_read_data(call,desc->buffer,desc->actual,RXRPC_CALL_READ_BLOCK);
3641 +       if (ret<0)
3642 +               goto read_failed;
3643 +
3644 +       /* wait for the rest of the reply to completely arrive */
3645 +       rxrpc_call_reset_scratch(call);
3646 +       bp = rxrpc_call_alloc_scratch(call,120);
3647 +
3648 +       ret = rxrpc_call_read_data(call,bp,120,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
3649 +       if (ret<0)
3650 +               goto read_failed;
3651 +
3652 +       /* unmarshall the reply */
3653 +       vnode->status.if_version        = ntohl(*bp++);
3654 +       vnode->status.type              = ntohl(*bp++);
3655 +       vnode->status.nlink             = ntohl(*bp++);
3656 +       vnode->status.size              = ntohl(*bp++);
3657 +       vnode->status.version           = ntohl(*bp++);
3658 +       vnode->status.author            = ntohl(*bp++);
3659 +       vnode->status.owner             = ntohl(*bp++);
3660 +       vnode->status.caller_access     = ntohl(*bp++);
3661 +       vnode->status.anon_access       = ntohl(*bp++);
3662 +       vnode->status.mode              = ntohl(*bp++);
3663 +       vnode->status.parent.vid        = desc->fid.vid;
3664 +       vnode->status.parent.vnode      = ntohl(*bp++);
3665 +       vnode->status.parent.unique     = ntohl(*bp++);
3666 +       bp++; /* seg size */
3667 +       vnode->status.mtime_client      = ntohl(*bp++);
3668 +       vnode->status.mtime_server      = ntohl(*bp++);
3669 +       bp++; /* group */
3670 +       bp++; /* sync counter */
3671 +       vnode->status.version           |= ((unsigned long long) ntohl(*bp++)) << 32;
3672 +       bp++; /* spare2 */
3673 +       bp++; /* spare3 */
3674 +       bp++; /* spare4 */
3675 +
3676 +       vnode->cb_version               = ntohl(*bp++);
3677 +       vnode->cb_expiry                = ntohl(*bp++);
3678 +       vnode->cb_type                  = ntohl(*bp++);
3679 +
3680 +       if (volsync) {
3681 +               volsync->creation       = ntohl(*bp++);
3682 +               bp++; /* spare2 */
3683 +               bp++; /* spare3 */
3684 +               bp++; /* spare4 */
3685 +               bp++; /* spare5 */
3686 +               bp++; /* spare6 */
3687 +       }
3688 +
3689 +       /* success */
3690 +       ret = 0;
3691 +
3692 + out_unwait:
3693 +       set_current_state(TASK_RUNNING);
3694 +       remove_wait_queue(&call->waitq,&myself);
3695 +       rxrpc_put_call(call);
3696 + out_put_conn:
3697 +       afs_server_release_callslot(server,&callslot);
3698 + out:
3699 +       _leave(" = %d",ret);
3700 +       return ret;
3701 +
3702 + read_failed:
3703 +       if (ret==-ECONNABORTED) {
3704 +               ret = call->app_errno;
3705 +               goto out_unwait;
3706 +       }
3707 +
3708 + abort:
3709 +       set_current_state(TASK_UNINTERRUPTIBLE);
3710 +       rxrpc_call_abort(call,ret);
3711 +       schedule();
3712 +       goto out_unwait;
3713 +
3714 +} /* end afs_rxfs_fetch_file_data() */
3715 +
3716 +/*****************************************************************************/
3717 +/*
3718 + * ask the AFS fileserver to discard a callback request on a file
3719 + */
3720 +int afs_rxfs_give_up_callback(afs_server_t *server, afs_vnode_t *vnode)
3721 +{
3722 +       DECLARE_WAITQUEUE(myself,current);
3723 +
3724 +       struct afs_server_callslot callslot;
3725 +       struct rxrpc_call *call;
3726 +       struct iovec piov[1];
3727 +       size_t sent;
3728 +       int ret;
3729 +       u32 *bp;
3730 +
3731 +       _enter("%p,{%u,%u,%u}",server,vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
3732 +
3733 +       /* get hold of the fileserver connection */
3734 +       ret = afs_server_request_callslot(server,&callslot);
3735 +       if (ret<0)
3736 +               goto out;
3737 +
3738 +       /* create a call through that connection */
3739 +       ret = rxrpc_create_call(callslot.conn,NULL,NULL,afs_rxfs_aemap,&call);
3740 +       if (ret<0) {
3741 +               printk("kAFS: Unable to create call: %d\n",ret);
3742 +               goto out_put_conn;
3743 +       }
3744 +       call->app_opcode = FSGIVEUPCALLBACKS;
3745 +
3746 +       /* we want to get event notifications from the call */
3747 +       add_wait_queue(&call->waitq,&myself);
3748 +
3749 +       /* marshall the parameters */
3750 +       bp = rxrpc_call_alloc_scratch(call,(1+4+4)*4);
3751 +
3752 +       piov[0].iov_len = (1+4+4)*4;
3753 +       piov[0].iov_base = bp;
3754 +
3755 +       *bp++ = htonl(FSGIVEUPCALLBACKS);
3756 +       *bp++ = htonl(1);
3757 +       *bp++ = htonl(vnode->fid.vid);
3758 +       *bp++ = htonl(vnode->fid.vnode);
3759 +       *bp++ = htonl(vnode->fid.unique);
3760 +       *bp++ = htonl(1);
3761 +       *bp++ = htonl(vnode->cb_version);
3762 +       *bp++ = htonl(vnode->cb_expiry);
3763 +       *bp++ = htonl(vnode->cb_type);
3764 +
3765 +       /* send the parameters to the server */
3766 +       ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3767 +       if (ret<0)
3768 +               goto abort;
3769 +
3770 +       /* wait for the reply to completely arrive */
3771 +       for (;;) {
3772 +               set_current_state(TASK_INTERRUPTIBLE);
3773 +               if (call->app_call_state!=RXRPC_CSTATE_CLNT_RCV_REPLY ||
3774 +                   signal_pending(current))
3775 +                       break;
3776 +               schedule();
3777 +       }
3778 +       set_current_state(TASK_RUNNING);
3779 +
3780 +       ret = -EINTR;
3781 +       if (signal_pending(current))
3782 +               goto abort;
3783 +
3784 +       switch (call->app_call_state) {
3785 +       case RXRPC_CSTATE_ERROR:
3786 +               ret = call->app_errno;
3787 +               goto out_unwait;
3788 +
3789 +       case RXRPC_CSTATE_CLNT_GOT_REPLY:
3790 +               ret = 0;
3791 +               goto out_unwait;
3792 +
3793 +       default:
3794 +               BUG();
3795 +       }
3796 +
3797 + out_unwait:
3798 +       set_current_state(TASK_RUNNING);
3799 +       remove_wait_queue(&call->waitq,&myself);
3800 +       rxrpc_put_call(call);
3801 + out_put_conn:
3802 +       afs_server_release_callslot(server,&callslot);
3803 + out:
3804 +       _leave("");
3805 +       return ret;
3806 +
3807 + abort:
3808 +       set_current_state(TASK_UNINTERRUPTIBLE);
3809 +       rxrpc_call_abort(call,ret);
3810 +       schedule();
3811 +       goto out_unwait;
3812 +} /* end afs_rxfs_give_up_callback() */
3813 +
3814 +/*****************************************************************************/
3815 +/*
3816 + * look a filename up in a directory
3817 + * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
3818 + */
3819 +#if 0
3820 +int afs_rxfs_lookup(afs_server_t *server,
3821 +                   afs_vnode_t *dir,
3822 +                   const char *filename,
3823 +                   afs_vnode_t *vnode,
3824 +                   afs_volsync_t *volsync)
3825 +{
3826 +       DECLARE_WAITQUEUE(myself,current);
3827 +
3828 +       struct rxrpc_connection *conn;
3829 +       struct rxrpc_call *call;
3830 +       struct iovec piov[3];
3831 +       size_t sent;
3832 +       int ret;
3833 +       u32 *bp, zero;
3834 +
3835 +       kenter("%p,{%u,%u,%u},%s",server,fid->vid,fid->vnode,fid->unique,filename);
3836 +
3837 +       /* get hold of the fileserver connection */
3838 +       ret = afs_server_get_fsconn(server,&conn);
3839 +       if (ret<0)
3840 +               goto out;
3841 +
3842 +       /* create a call through that connection */
3843 +       ret = rxrpc_create_call(conn,NULL,NULL,afs_rxfs_aemap,&call);
3844 +       if (ret<0) {
3845 +               printk("kAFS: Unable to create call: %d\n",ret);
3846 +               goto out_put_conn;
3847 +       }
3848 +       call->app_opcode = FSLOOKUP;
3849 +
3850 +       /* we want to get event notifications from the call */
3851 +       add_wait_queue(&call->waitq,&myself);
3852 +
3853 +       /* marshall the parameters */
3854 +       bp = rxrpc_call_alloc_scratch(call,20);
3855 +
3856 +       zero = 0;
3857 +
3858 +       piov[0].iov_len = 20;
3859 +       piov[0].iov_base = bp;
3860 +       piov[1].iov_len = strlen(filename);
3861 +       piov[1].iov_base = (char*) filename;
3862 +       piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
3863 +       piov[2].iov_base = &zero;
3864 +
3865 +       *bp++ = htonl(FSLOOKUP);
3866 +       *bp++ = htonl(dirfid->vid);
3867 +       *bp++ = htonl(dirfid->vnode);
3868 +       *bp++ = htonl(dirfid->unique);
3869 +       *bp++ = htonl(piov[1].iov_len);
3870 +
3871 +       /* send the parameters to the server */
3872 +       ret = rxrpc_call_write_data(call,3,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3873 +       if (ret<0)
3874 +               goto abort;
3875 +
3876 +       /* wait for the reply to completely arrive */
3877 +       bp = rxrpc_call_alloc_scratch(call,220);
3878 +
3879 +       ret = rxrpc_call_read_data(call,bp,220,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
3880 +       if (ret<0) {
3881 +               if (ret==-ECONNABORTED) {
3882 +                       ret = call->app_errno;
3883 +                       goto out_unwait;
3884 +               }
3885 +               goto abort;
3886 +       }
3887 +
3888 +       /* unmarshall the reply */
3889 +       fid->vid                = ntohl(*bp++);
3890 +       fid->vnode              = ntohl(*bp++);
3891 +       fid->unique             = ntohl(*bp++);
3892 +
3893 +       vnode->status.if_version        = ntohl(*bp++);
3894 +       vnode->status.type              = ntohl(*bp++);
3895 +       vnode->status.nlink             = ntohl(*bp++);
3896 +       vnode->status.size              = ntohl(*bp++);
3897 +       vnode->status.version           = ntohl(*bp++);
3898 +       vnode->status.author            = ntohl(*bp++);
3899 +       vnode->status.owner             = ntohl(*bp++);
3900 +       vnode->status.caller_access     = ntohl(*bp++);
3901 +       vnode->status.anon_access       = ntohl(*bp++);
3902 +       vnode->status.mode              = ntohl(*bp++);
3903 +       vnode->status.parent.vid        = dirfid->vid;
3904 +       vnode->status.parent.vnode      = ntohl(*bp++);
3905 +       vnode->status.parent.unique     = ntohl(*bp++);
3906 +       bp++; /* seg size */
3907 +       vnode->status.mtime_client      = ntohl(*bp++);
3908 +       vnode->status.mtime_server      = ntohl(*bp++);
3909 +       bp++; /* group */
3910 +       bp++; /* sync counter */
3911 +       vnode->status.version           |= ((unsigned long long) ntohl(*bp++)) << 32;
3912 +       bp++; /* spare2 */
3913 +       bp++; /* spare3 */
3914 +       bp++; /* spare4 */
3915 +
3916 +       dir->status.if_version          = ntohl(*bp++);
3917 +       dir->status.type                        = ntohl(*bp++);
3918 +       dir->status.nlink               = ntohl(*bp++);
3919 +       dir->status.size                        = ntohl(*bp++);
3920 +       dir->status.version             = ntohl(*bp++);
3921 +       dir->status.author              = ntohl(*bp++);
3922 +       dir->status.owner               = ntohl(*bp++);
3923 +       dir->status.caller_access       = ntohl(*bp++);
3924 +       dir->status.anon_access         = ntohl(*bp++);
3925 +       dir->status.mode                        = ntohl(*bp++);
3926 +       dir->status.parent.vid          = dirfid->vid;
3927 +       dir->status.parent.vnode                = ntohl(*bp++);
3928 +       dir->status.parent.unique       = ntohl(*bp++);
3929 +       bp++; /* seg size */
3930 +       dir->status.mtime_client                = ntohl(*bp++);
3931 +       dir->status.mtime_server                = ntohl(*bp++);
3932 +       bp++; /* group */
3933 +       bp++; /* sync counter */
3934 +       dir->status.version             |= ((unsigned long long) ntohl(*bp++)) << 32;
3935 +       bp++; /* spare2 */
3936 +       bp++; /* spare3 */
3937 +       bp++; /* spare4 */
3938 +
3939 +       callback->fid           = *fid;
3940 +       callback->version       = ntohl(*bp++);
3941 +       callback->expiry        = ntohl(*bp++);
3942 +       callback->type          = ntohl(*bp++);
3943 +
3944 +       if (volsync) {
3945 +               volsync->creation       = ntohl(*bp++);
3946 +               bp++; /* spare2 */
3947 +               bp++; /* spare3 */
3948 +               bp++; /* spare4 */
3949 +               bp++; /* spare5 */
3950 +               bp++; /* spare6 */
3951 +       }
3952 +
3953 +       /* success */
3954 +       ret = 0;
3955 +
3956 + out_unwait:
3957 +       set_current_state(TASK_RUNNING);
3958 +       remove_wait_queue(&call->waitq,&myself);
3959 +       rxrpc_put_call(call);
3960 + out_put_conn:
3961 +       afs_server_release_fsconn(server,conn);
3962 + out:
3963 +       kleave("");
3964 +       return ret;
3965 +
3966 + abort:
3967 +       set_current_state(TASK_UNINTERRUPTIBLE);
3968 +       rxrpc_call_abort(call,ret);
3969 +       schedule();
3970 +       goto out_unwait;
3971 +} /* end afs_rxfs_lookup() */
3972 +#endif
3973 diff -urNp linux-5240/fs/afs/fsclient.h linux-5250/fs/afs/fsclient.h
3974 --- linux-5240/fs/afs/fsclient.h        1970-01-01 01:00:00.000000000 +0100
3975 +++ linux-5250/fs/afs/fsclient.h        
3976 @@ -0,0 +1,53 @@
3977 +/* fsclient.h: AFS File Server client stub declarations
3978 + *
3979 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
3980 + * Written by David Howells (dhowells@redhat.com)
3981 + *
3982 + * This program is free software; you can redistribute it and/or
3983 + * modify it under the terms of the GNU General Public License
3984 + * as published by the Free Software Foundation; either version
3985 + * 2 of the License, or (at your option) any later version.
3986 + */
3987 +
3988 +#ifndef _LINUX_AFS_FSCLIENT_H
3989 +#define _LINUX_AFS_FSCLIENT_H
3990 +
3991 +#include "server.h"
3992 +
3993 +extern int afs_rxfs_get_volume_info(afs_server_t *server,
3994 +                                   const char *name,
3995 +                                   afs_volume_info_t *vinfo);
3996 +
3997 +extern int afs_rxfs_fetch_file_status(afs_server_t *server,
3998 +                                     afs_vnode_t *vnode,
3999 +                                     afs_volsync_t *volsync);
4000 +
4001 +struct afs_rxfs_fetch_descriptor {
4002 +       afs_fid_t       fid;            /* file ID to fetch */
4003 +       size_t          size;           /* total number of bytes to fetch */
4004 +       off_t           offset;         /* offset in file to start from */
4005 +       void            *buffer;        /* read buffer */
4006 +       size_t          actual;         /* actual size sent back by server */
4007 +};
4008 +
4009 +extern int afs_rxfs_fetch_file_data(afs_server_t *server,
4010 +                                   afs_vnode_t *vnode,
4011 +                                   struct afs_rxfs_fetch_descriptor *desc,
4012 +                                   afs_volsync_t *volsync);
4013 +
4014 +extern int afs_rxfs_give_up_callback(afs_server_t *server, afs_vnode_t *vnode);
4015 +
4016 +/* this doesn't appear to work in OpenAFS server */
4017 +extern int afs_rxfs_lookup(afs_server_t *server,
4018 +                          afs_vnode_t *dir,
4019 +                          const char *filename,
4020 +                          afs_vnode_t *vnode,
4021 +                          afs_volsync_t *volsync);
4022 +
4023 +/* this is apparently mis-implemented in OpenAFS server */
4024 +extern int afs_rxfs_get_root_volume(afs_server_t *server,
4025 +                                   char *buf,
4026 +                                   size_t *buflen);
4027 +
4028 +
4029 +#endif /* _LINUX_AFS_FSCLIENT_H */
4030 diff -urNp linux-5240/fs/afs/inode.c linux-5250/fs/afs/inode.c
4031 --- linux-5240/fs/afs/inode.c   1970-01-01 01:00:00.000000000 +0100
4032 +++ linux-5250/fs/afs/inode.c   
4033 @@ -0,0 +1,412 @@
4034 +/*
4035 + * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
4036 + *
4037 + * This software may be freely redistributed under the terms of the
4038 + * GNU General Public License.
4039 + *
4040 + * You should have received a copy of the GNU General Public License
4041 + * along with this program; if not, write to the Free Software
4042 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
4043 + *
4044 + * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
4045 + *          David Howells <dhowells@redhat.com>
4046 + *
4047 + */
4048 +
4049 +#include <linux/kernel.h>
4050 +#include <linux/module.h>
4051 +#include <linux/init.h>
4052 +#include <linux/sched.h>
4053 +#include <linux/slab.h>
4054 +#include <linux/fs.h>
4055 +#include <linux/pagemap.h>
4056 +#include "volume.h"
4057 +#include "vnode.h"
4058 +#include "cache.h"
4059 +#include "super.h"
4060 +#include "internal.h"
4061 +
4062 +struct afs_iget_data {
4063 +       afs_fid_t               fid;
4064 +       afs_volume_t            *volume;        /* volume on which resides */
4065 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
4066 +       afs_vnode_t             *new_vnode;     /* new vnode record */
4067 +#endif
4068 +};
4069 +
4070 +/*****************************************************************************/
4071 +/*
4072 + * map the AFS file status to the inode member variables
4073 + */
4074 +static int afs_inode_map_status(afs_vnode_t *vnode)
4075 +{
4076 +       struct inode *inode = AFS_VNODE_TO_I(vnode);
4077 +
4078 +       _debug("FS: ft=%d lk=%d sz=%u ver=%Lu mod=%hu",
4079 +              vnode->status.type,
4080 +              vnode->status.nlink,
4081 +              vnode->status.size,
4082 +              vnode->status.version,
4083 +              vnode->status.mode);
4084 +
4085 +       switch (vnode->status.type) {
4086 +       case AFS_FTYPE_FILE:
4087 +               inode->i_mode   = S_IFREG | vnode->status.mode;
4088 +               inode->i_op     = &afs_file_inode_operations;
4089 +               inode->i_fop    = &afs_file_file_operations;
4090 +               break;
4091 +       case AFS_FTYPE_DIR:
4092 +               inode->i_mode   = S_IFDIR | vnode->status.mode;
4093 +               inode->i_op     = &afs_dir_inode_operations;
4094 +               inode->i_fop    = &afs_dir_file_operations;
4095 +               break;
4096 +       case AFS_FTYPE_SYMLINK:
4097 +               inode->i_mode   = S_IFLNK | vnode->status.mode;
4098 +               inode->i_op     = &page_symlink_inode_operations;
4099 +               break;
4100 +       default:
4101 +               printk("kAFS: AFS vnode with undefined type\n");
4102 +               return -EBADMSG;
4103 +       }
4104 +
4105 +       inode->i_nlink          = vnode->status.nlink;
4106 +       inode->i_uid            = vnode->status.owner;
4107 +       inode->i_gid            = 0;
4108 +       inode->i_rdev           = NODEV;
4109 +       inode->i_size           = vnode->status.size;
4110 +       inode->i_atime          = inode->i_mtime = inode->i_ctime = vnode->status.mtime_server;
4111 +       inode->i_blksize        = PAGE_CACHE_SIZE;
4112 +       inode->i_blocks         = 0;
4113 +       inode->i_version        = vnode->fid.unique;
4114 +       inode->i_mapping->a_ops = &afs_fs_aops;
4115 +
4116 +       /* check to see whether a symbolic link is really a mountpoint */
4117 +       if (vnode->status.type==AFS_FTYPE_SYMLINK) {
4118 +               afs_mntpt_check_symlink(vnode);
4119 +
4120 +               if (vnode->flags & AFS_VNODE_MOUNTPOINT) {
4121 +                       inode->i_mode   = S_IFDIR | vnode->status.mode;
4122 +                       inode->i_op     = &afs_mntpt_inode_operations;
4123 +                       inode->i_fop    = &afs_mntpt_file_operations;
4124 +               }
4125 +       }
4126 +
4127 +       return 0;
4128 +} /* end afs_inode_map_status() */
4129 +
4130 +/*****************************************************************************/
4131 +/*
4132 + * attempt to fetch the status of an inode, coelescing multiple simultaneous fetches
4133 + */
4134 +int afs_inode_fetch_status(struct inode *inode)
4135 +{
4136 +       afs_vnode_t *vnode;
4137 +       int ret;
4138 +
4139 +       vnode = AFS_FS_I(inode);
4140 +
4141 +       ret = afs_vnode_fetch_status(vnode);
4142 +
4143 +       if (ret==0)
4144 +               ret = afs_inode_map_status(vnode);
4145 +
4146 +       return ret;
4147 +
4148 +} /* end afs_inode_fetch_status() */
4149 +
4150 +/*****************************************************************************/
4151 +/*
4152 + * iget5() comparator
4153 + */
4154 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
4155 +static int afs_iget5_test(struct inode *inode, void *opaque)
4156 +{
4157 +       struct afs_iget_data *data = opaque;
4158 +
4159 +       /* only match inodes with the same version number */
4160 +       return inode->i_ino==data->fid.vnode && inode->i_version==data->fid.unique;
4161 +} /* end afs_iget5_test() */
4162 +#endif
4163 +
4164 +/*****************************************************************************/
4165 +/*
4166 + * iget5() inode initialiser
4167 + */
4168 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
4169 +static int afs_iget5_set(struct inode *inode, void *opaque)
4170 +{
4171 +       struct afs_iget_data *data = opaque;
4172 +       afs_vnode_t *vnode = AFS_FS_I(inode);
4173 +
4174 +       inode->i_ino = data->fid.vnode;
4175 +       inode->i_version = data->fid.unique;
4176 +       vnode->fid = data->fid;
4177 +       vnode->volume = data->volume;
4178 +
4179 +       return 0;
4180 +} /* end afs_iget5_set() */
4181 +#endif
4182 +
4183 +/*****************************************************************************/
4184 +/*
4185 + * iget4() comparator
4186 + */
4187 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
4188 +static int afs_iget4_test(struct inode *inode, ino_t ino, void *opaque)
4189 +{
4190 +       struct afs_iget_data *data = opaque;
4191 +
4192 +       /* only match inodes with the same version number */
4193 +       return inode->i_ino==data->fid.vnode && inode->i_version==data->fid.unique;
4194 +} /* end afs_iget4_test() */
4195 +#endif
4196 +
4197 +/*****************************************************************************/
4198 +/*
4199 + * read an inode (2.4 only)
4200 + */
4201 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
4202 +void afs_read_inode2(struct inode *inode, void *opaque)
4203 +{
4204 +       struct afs_iget_data *data = opaque;
4205 +       afs_vnode_t *vnode;
4206 +       int ret;
4207 +
4208 +       kenter(",{{%u,%u,%u},%p}",data->fid.vid,data->fid.vnode,data->fid.unique,data->volume);
4209 +
4210 +       if (inode->u.generic_ip) BUG();
4211 +
4212 +       /* attach a pre-allocated vnode record */
4213 +       inode->u.generic_ip = vnode = data->new_vnode;
4214 +       data->new_vnode = NULL;
4215 +
4216 +       memset(vnode,0,sizeof(*vnode));
4217 +       vnode->inode = inode;
4218 +       init_waitqueue_head(&vnode->update_waitq);
4219 +       spin_lock_init(&vnode->lock);
4220 +       INIT_LIST_HEAD(&vnode->cb_link);
4221 +       INIT_LIST_HEAD(&vnode->cb_hash_link);
4222 +       afs_timer_init(&vnode->cb_timeout,&afs_vnode_cb_timed_out_ops);
4223 +       vnode->flags |= AFS_VNODE_CHANGED;
4224 +       vnode->volume = data->volume;
4225 +       vnode->fid = data->fid;
4226 +
4227 +       /* ask the server for a status check */
4228 +       ret = afs_vnode_fetch_status(vnode);
4229 +       if (ret<0) {
4230 +               make_bad_inode(inode);
4231 +               kleave(" [bad inode]");
4232 +               return;
4233 +       }
4234 +
4235 +       ret = afs_inode_map_status(vnode);
4236 +       if (ret<0) {
4237 +               make_bad_inode(inode);
4238 +               kleave(" [bad inode]");
4239 +               return;
4240 +       }
4241 +
4242 +       kleave("");
4243 +       return;
4244 +} /* end afs_read_inode2() */
4245 +#endif
4246 +
4247 +/*****************************************************************************/
4248 +/*
4249 + * inode retrieval
4250 + */
4251 +inline int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inode)
4252 +{
4253 +       struct afs_iget_data data = { fid: *fid };
4254 +       struct afs_super_info *as;
4255 +       struct inode *inode;
4256 +       afs_vnode_t *vnode;
4257 +       int ret;
4258 +
4259 +       kenter(",{%u,%u,%u},,",fid->vid,fid->vnode,fid->unique);
4260 +
4261 +       as = sb->u.generic_sbp;
4262 +       data.volume = as->volume;
4263 +
4264 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
4265 +       inode = iget5_locked(sb,fid->vnode,afs_iget5_test,afs_iget5_set,&data);
4266 +       if (!inode) {
4267 +               _leave(" = -ENOMEM");
4268 +               return -ENOMEM;
4269 +       }
4270 +
4271 +       vnode = AFS_FS_I(inode);
4272 +
4273 +       /* deal with an existing inode */
4274 +       if (!(inode->i_state & I_NEW)) {
4275 +               ret = afs_vnode_fetch_status(vnode);
4276 +               if (ret==0)
4277 +                       *_inode = inode;
4278 +               else
4279 +                       iput(inode);
4280 +               _leave(" = %d",ret);
4281 +               return ret;
4282 +       }
4283 +
4284 +       /* okay... it's a new inode */
4285 +       vnode->flags |= AFS_VNODE_CHANGED;
4286 +       ret = afs_inode_fetch_status(inode);
4287 +       if (ret<0)
4288 +               goto bad_inode;
4289 +
4290 +#if 0
4291 +       /* find a cache entry for it */
4292 +       ret = afs_cache_lookup_vnode(as->volume,vnode);
4293 +       if (ret<0)
4294 +               goto bad_inode;
4295 +#endif
4296 +
4297 +       /* success */
4298 +       unlock_new_inode(inode);
4299 +
4300 +       *_inode = inode;
4301 +       _leave(" = 0 [CB { v=%u x=%lu t=%u nix=%u }]",
4302 +              vnode->cb_version,
4303 +              vnode->cb_timeout.timo_jif,
4304 +              vnode->cb_type,
4305 +              vnode->nix
4306 +              );
4307 +       return 0;
4308 +
4309 +       /* failure */
4310 + bad_inode:
4311 +       make_bad_inode(inode);
4312 +       unlock_new_inode(inode);
4313 +       iput(inode);
4314 +
4315 +       _leave(" = %d [bad]",ret);
4316 +       return ret;
4317 +
4318 +#else
4319 +
4320 +       /* pre-allocate a vnode record so that afs_read_inode2() doesn't have to return an inode
4321 +        * without one attached
4322 +        */
4323 +       data.new_vnode = kmalloc(sizeof(afs_vnode_t),GFP_KERNEL);
4324 +       if (!data.new_vnode) {
4325 +               kleave(" = -ENOMEM");
4326 +               return -ENOMEM;
4327 +       }
4328 +
4329 +       inode = iget4(sb,fid->vnode,afs_iget4_test,&data);
4330 +       if (data.new_vnode) kfree(data.new_vnode); 
4331 +       if (!inode) {
4332 +               kleave(" = -ENOMEM");
4333 +               return -ENOMEM;
4334 +       }
4335 +
4336 +       vnode = AFS_FS_I(inode);
4337 +       *_inode = inode;
4338 +       kleave(" = 0 [CB { v=%u x=%lu t=%u nix=%u }]",
4339 +              vnode->cb_version,
4340 +              vnode->cb_timeout.timo_jif,
4341 +              vnode->cb_type,
4342 +              vnode->nix
4343 +              );
4344 +       return 0;
4345 +#endif
4346 +} /* end afs_iget() */
4347 +
4348 +/*****************************************************************************/
4349 +/*
4350 + * read the attributes of an inode
4351 + */
4352 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
4353 +int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
4354 +{
4355 +       struct inode *inode;
4356 +       afs_vnode_t *vnode;
4357 +       int ret;
4358 +
4359 +       inode = dentry->d_inode;
4360 +
4361 +       _enter("{ ino=%lu v=%lu }",inode->i_ino,inode->i_version);
4362 +
4363 +       vnode = AFS_FS_I(inode);
4364 +
4365 +       ret = afs_inode_fetch_status(inode);
4366 +       if (ret==-ENOENT) {
4367 +               _leave(" = %d [%d %p]",ret,atomic_read(&dentry->d_count),dentry->d_inode);
4368 +               return ret;
4369 +       }
4370 +       else if (ret<0) {
4371 +               make_bad_inode(inode);
4372 +               _leave(" = %d",ret);
4373 +               return ret;
4374 +       }
4375 +
4376 +       /* transfer attributes from the inode structure to the stat structure */
4377 +       generic_fillattr(inode,stat);
4378 +
4379 +       _leave(" = 0 CB { v=%u x=%u t=%u }",
4380 +              vnode->callback.version,
4381 +              vnode->callback.expiry,
4382 +              vnode->callback.type);
4383 +
4384 +       return 0;
4385 +} /* end afs_inode_getattr() */
4386 +#endif
4387 +
4388 +/*****************************************************************************/
4389 +/*
4390 + * revalidate the inode
4391 + */
4392 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
4393 +int afs_inode_revalidate(struct dentry *dentry)
4394 +{
4395 +       struct inode *inode;
4396 +       afs_vnode_t *vnode;
4397 +       int ret;
4398 +
4399 +       inode = dentry->d_inode;
4400 +
4401 +       _enter("{ ino=%lu v=%lu }",inode->i_ino,inode->i_version);
4402 +
4403 +       vnode = AFS_FS_I(inode);
4404 +
4405 +       ret = afs_inode_fetch_status(inode);
4406 +       if (ret==-ENOENT) {
4407 +               _leave(" = %d [%d %p]",ret,atomic_read(&dentry->d_count),dentry->d_inode);
4408 +               return ret;
4409 +       }
4410 +       else if (ret<0) {
4411 +               make_bad_inode(inode);
4412 +               _leave(" = %d",ret);
4413 +               return ret;
4414 +       }
4415 +
4416 +       _leave(" = 0 CB { v=%u x=%u t=%u }",
4417 +              vnode->cb_version,
4418 +              vnode->cb_expiry,
4419 +              vnode->cb_type);
4420 +
4421 +       return 0;
4422 +} /* end afs_inode_revalidate() */
4423 +#endif
4424 +
4425 +/*****************************************************************************/
4426 +/*
4427 + * clear an AFS inode
4428 + */
4429 +void afs_clear_inode(struct inode *inode)
4430 +{
4431 +       afs_vnode_t *vnode;
4432 +
4433 +       vnode = AFS_FS_I(inode);
4434 +
4435 +       _enter("(ino=%lu { v=%u x=%u t=%u })",
4436 +              inode->i_ino,
4437 +              vnode->cb_version,
4438 +              vnode->cb_expiry,
4439 +              vnode->cb_type
4440 +              );
4441 +
4442 +       afs_vnode_give_up_callback(vnode);
4443 +
4444 +       _leave("");
4445 +} /* end afs_clear_inode() */
4446 diff -urNp linux-5240/fs/afs/internal.h linux-5250/fs/afs/internal.h
4447 --- linux-5240/fs/afs/internal.h        1970-01-01 01:00:00.000000000 +0100
4448 +++ linux-5250/fs/afs/internal.h        
4449 @@ -0,0 +1,115 @@
4450 +/* internal.h: internal AFS stuff
4451 + *
4452 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4453 + * Written by David Howells (dhowells@redhat.com)
4454 + *
4455 + * This program is free software; you can redistribute it and/or
4456 + * modify it under the terms of the GNU General Public License
4457 + * as published by the Free Software Foundation; either version
4458 + * 2 of the License, or (at your option) any later version.
4459 + */
4460 +
4461 +#ifndef AFS_INTERNAL_H
4462 +#define AFS_INTERNAL_H
4463 +
4464 +#include <linux/version.h>
4465 +#include <linux/compiler.h>
4466 +#include <linux/kernel.h>
4467 +#include <linux/fs.h>
4468 +
4469 +/*
4470 + * debug tracing
4471 + */
4472 +#define kenter(FMT,...)        printk("==> %s("FMT")\n",__FUNCTION__,##__VA_ARGS__)
4473 +#define kleave(FMT,...)        printk("<== %s()"FMT"\n",__FUNCTION__,##__VA_ARGS__)
4474 +#define kdebug(FMT,...)        printk(FMT"\n",##__VA_ARGS__)
4475 +#define kproto(FMT,...)        printk("### "FMT"\n",##__VA_ARGS__)
4476 +#define knet(FMT,...)  printk(FMT"\n",##__VA_ARGS__)
4477 +
4478 +#if 0
4479 +#define _enter(FMT,...)        kenter(FMT,##__VA_ARGS__)
4480 +#define _leave(FMT,...)        kleave(FMT,##__VA_ARGS__)
4481 +#define _debug(FMT,...)        kdebug(FMT,##__VA_ARGS__)
4482 +#define _proto(FMT,...)        kproto(FMT,##__VA_ARGS__)
4483 +#define _net(FMT,...)  knet(FMT,##__VA_ARGS__)
4484 +#else
4485 +#define _enter(FMT,...)        do { } while(0)
4486 +#define _leave(FMT,...)        do { } while(0)
4487 +#define _debug(FMT,...)        do { } while(0)
4488 +#define _proto(FMT,...)        do { } while(0)
4489 +#define _net(FMT,...)  do { } while(0)
4490 +#endif
4491 +
4492 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
4493 +#define wait_on_page_locked wait_on_page
4494 +#define PageUptodate Page_Uptodate
4495 +
4496 +static inline struct proc_dir_entry *PDE(const struct inode *inode)
4497 +{
4498 +       return (struct proc_dir_entry *)inode->u.generic_ip;
4499 +}
4500 +#endif
4501 +
4502 +/*
4503 + * cell.c
4504 + */
4505 +extern struct rw_semaphore afs_proc_cells_sem;
4506 +extern struct list_head afs_proc_cells;
4507 +
4508 +/*
4509 + * dir.c
4510 + */
4511 +extern struct inode_operations afs_dir_inode_operations;
4512 +extern struct file_operations afs_dir_file_operations;
4513 +
4514 +/*
4515 + * file.c
4516 + */
4517 +extern struct address_space_operations afs_fs_aops;
4518 +extern struct inode_operations afs_file_inode_operations;
4519 +extern struct file_operations afs_file_file_operations;
4520 +
4521 +/*
4522 + * inode.c
4523 + */
4524 +extern int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inode);
4525 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
4526 +extern int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
4527 +#else
4528 +extern void afs_read_inode2(struct inode *inode, void *opaque);
4529 +extern int afs_inode_revalidate(struct dentry *dentry);
4530 +#endif
4531 +extern void afs_clear_inode(struct inode *inode);
4532 +
4533 +/*
4534 + * mntpt.c
4535 + */
4536 +extern struct inode_operations afs_mntpt_inode_operations;
4537 +extern struct file_operations afs_mntpt_file_operations;
4538 +
4539 +extern int afs_mntpt_check_symlink(afs_vnode_t *vnode);
4540 +
4541 +/*
4542 + * super.c
4543 + */
4544 +extern int afs_fs_init(void);
4545 +extern void afs_fs_exit(void);
4546 +
4547 +#define AFS_CB_HASH_COUNT (PAGE_SIZE/sizeof(struct list_head))
4548 +
4549 +extern struct list_head afs_cb_hash_tbl[];
4550 +extern spinlock_t afs_cb_hash_lock;
4551 +
4552 +#define afs_cb_hash(SRV,FID) \
4553 +       afs_cb_hash_tbl[((unsigned)(SRV) + (FID)->vid + (FID)->vnode + (FID)->unique) % \
4554 +                       AFS_CB_HASH_COUNT]
4555 +
4556 +/*
4557 + * proc.c
4558 + */
4559 +extern int afs_proc_init(void);
4560 +extern void afs_proc_cleanup(void);
4561 +extern int afs_proc_cell_setup(afs_cell_t *cell);
4562 +extern void afs_proc_cell_remove(afs_cell_t *cell);
4563 +
4564 +#endif /* AFS_INTERNAL_H */
4565 diff -urNp linux-5240/fs/afs/kafsasyncd.c linux-5250/fs/afs/kafsasyncd.c
4566 --- linux-5240/fs/afs/kafsasyncd.c      1970-01-01 01:00:00.000000000 +0100
4567 +++ linux-5250/fs/afs/kafsasyncd.c      
4568 @@ -0,0 +1,266 @@
4569 +/* kafsasyncd.c: AFS asynchronous operation daemon
4570 + *
4571 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4572 + * Written by David Howells (dhowells@redhat.com)
4573 + *
4574 + * This program is free software; you can redistribute it and/or
4575 + * modify it under the terms of the GNU General Public License
4576 + * as published by the Free Software Foundation; either version
4577 + * 2 of the License, or (at your option) any later version.
4578 + *
4579 + *
4580 + * The AFS async daemon is used to the following:
4581 + * - probe "dead" servers to see whether they've come back to life yet.
4582 + * - probe "live" servers that we haven't talked to for a while to see if they are better
4583 + *   candidates for serving than what we're currently using
4584 + * - poll volume location servers to keep up to date volume location lists
4585 + */
4586 +
4587 +#include <linux/version.h>
4588 +#include <linux/module.h>
4589 +#include <linux/init.h>
4590 +#include <linux/sched.h>
4591 +#include <linux/completion.h>
4592 +#include "cell.h"
4593 +#include "server.h"
4594 +#include "volume.h"
4595 +#include "kafsasyncd.h"
4596 +#include "kafstimod.h"
4597 +#include <rxrpc/call.h>
4598 +#include <asm/errno.h>
4599 +#include "internal.h"
4600 +
4601 +static DECLARE_COMPLETION(kafsasyncd_alive);
4602 +static DECLARE_COMPLETION(kafsasyncd_dead);
4603 +static DECLARE_WAIT_QUEUE_HEAD(kafsasyncd_sleepq);
4604 +static struct task_struct *kafsasyncd_task;
4605 +static int kafsasyncd_die;
4606 +
4607 +static int kafsasyncd(void *arg);
4608 +
4609 +static LIST_HEAD(kafsasyncd_async_attnq);
4610 +static LIST_HEAD(kafsasyncd_async_busyq);
4611 +static spinlock_t kafsasyncd_async_lock = SPIN_LOCK_UNLOCKED;
4612 +
4613 +static void kafsasyncd_null_call_attn_func(struct rxrpc_call *call)
4614 +{
4615 +}
4616 +
4617 +static void kafsasyncd_null_call_error_func(struct rxrpc_call *call)
4618 +{
4619 +}
4620 +
4621 +/*****************************************************************************/
4622 +/*
4623 + * start the async daemon
4624 + */
4625 +int afs_kafsasyncd_start(void)
4626 +{
4627 +       int ret;
4628 +
4629 +       ret = kernel_thread(kafsasyncd,NULL,0);
4630 +       if (ret<0)
4631 +               return ret;
4632 +
4633 +       wait_for_completion(&kafsasyncd_alive);
4634 +
4635 +       return ret;
4636 +} /* end afs_kafsasyncd_start() */
4637 +
4638 +/*****************************************************************************/
4639 +/*
4640 + * stop the async daemon
4641 + */
4642 +void afs_kafsasyncd_stop(void)
4643 +{
4644 +       /* get rid of my daemon */
4645 +       kafsasyncd_die = 1;
4646 +       wake_up(&kafsasyncd_sleepq);
4647 +       wait_for_completion(&kafsasyncd_dead);
4648 +
4649 +} /* end afs_kafsasyncd_stop() */
4650 +
4651 +/*****************************************************************************/
4652 +/*
4653 + * probing daemon
4654 + */
4655 +static int kafsasyncd(void *arg)
4656 +{
4657 +       DECLARE_WAITQUEUE(myself,current);
4658 +
4659 +       struct list_head *_p;
4660 +       int die;
4661 +
4662 +       kafsasyncd_task = current;
4663 +
4664 +       printk("kAFS: Started kafsasyncd %d\n",current->pid);
4665 +       strcpy(current->comm,"kafsasyncd");
4666 +
4667 +       daemonize();
4668 +
4669 +       complete(&kafsasyncd_alive);
4670 +
4671 +       /* only certain signals are of interest */
4672 +       spin_lock_irq(&current->sigmask_lock);
4673 +       siginitsetinv(&current->blocked,0);
4674 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
4675 +       recalc_sigpending();
4676 +#else
4677 +       recalc_sigpending(current);
4678 +#endif
4679 +       spin_unlock_irq(&current->sigmask_lock);
4680 +
4681 +       /* loop around looking for things to attend to */
4682 +       do {
4683 +               set_current_state(TASK_INTERRUPTIBLE);
4684 +               add_wait_queue(&kafsasyncd_sleepq,&myself);
4685 +
4686 +               for (;;) {
4687 +                       if (!list_empty(&kafsasyncd_async_attnq) ||
4688 +                           signal_pending(current) ||
4689 +                           kafsasyncd_die)
4690 +                               break;
4691 +
4692 +                       schedule();
4693 +                       set_current_state(TASK_INTERRUPTIBLE);
4694 +               }
4695 +
4696 +               remove_wait_queue(&kafsasyncd_sleepq,&myself);
4697 +               set_current_state(TASK_RUNNING);
4698 +
4699 +               /* discard pending signals */
4700 +               while (signal_pending(current)) {
4701 +                       siginfo_t sinfo;
4702 +
4703 +                       spin_lock_irq(&current->sigmask_lock);
4704 +                       dequeue_signal(&current->blocked,&sinfo);
4705 +                       spin_unlock_irq(&current->sigmask_lock);
4706 +               }
4707 +
4708 +               die = kafsasyncd_die;
4709 +
4710 +               /* deal with the next asynchronous operation requiring attention */
4711 +               if (!list_empty(&kafsasyncd_async_attnq)) {
4712 +                       struct afs_async_op *op;
4713 +
4714 +                       _debug("@@@ Begin Asynchronous Operation");
4715 +
4716 +                       op = NULL;
4717 +                       spin_lock(&kafsasyncd_async_lock);
4718 +
4719 +                       if (!list_empty(&kafsasyncd_async_attnq)) {
4720 +                               op = list_entry(kafsasyncd_async_attnq.next,afs_async_op_t,link);
4721 +                               list_del(&op->link);
4722 +                               list_add_tail(&op->link,&kafsasyncd_async_busyq);
4723 +                       }
4724 +
4725 +                       spin_unlock(&kafsasyncd_async_lock);
4726 +
4727 +                       _debug("@@@ Operation %p {%p}\n",op,op?op->ops:NULL);
4728 +
4729 +                       if (op)
4730 +                               op->ops->attend(op);
4731 +
4732 +                       _debug("@@@ End Asynchronous Operation");
4733 +               }
4734 +
4735 +       } while(!die);
4736 +
4737 +       /* need to kill all outstanding asynchronous operations before exiting */
4738 +       kafsasyncd_task = NULL;
4739 +       spin_lock(&kafsasyncd_async_lock);
4740 +
4741 +       /* fold the busy and attention queues together */
4742 +       list_splice(&kafsasyncd_async_busyq,&kafsasyncd_async_attnq);
4743 +       list_del_init(&kafsasyncd_async_busyq);
4744 +
4745 +       /* dequeue kafsasyncd from all their wait queues */
4746 +       list_for_each(_p,&kafsasyncd_async_attnq) {
4747 +               afs_async_op_t *op = list_entry(_p,afs_async_op_t,link);
4748 +
4749 +               op->call->app_attn_func = kafsasyncd_null_call_attn_func;
4750 +               op->call->app_error_func = kafsasyncd_null_call_error_func;
4751 +               remove_wait_queue(&op->call->waitq,&op->waiter);
4752 +       }
4753 +
4754 +       spin_unlock(&kafsasyncd_async_lock);
4755 +
4756 +       /* abort all the operations */
4757 +       while (!list_empty(&kafsasyncd_async_attnq)) {
4758 +               afs_async_op_t *op = list_entry(_p,afs_async_op_t,link);
4759 +               list_del_init(&op->link);
4760 +
4761 +               rxrpc_call_abort(op->call,-EIO);
4762 +               rxrpc_put_call(op->call);
4763 +               op->call = NULL;
4764 +
4765 +               op->ops->discard(op);
4766 +       }
4767 +
4768 +       /* and that's all */
4769 +       _leave("");
4770 +       complete_and_exit(&kafsasyncd_dead,0);
4771 +
4772 +} /* end kafsasyncd() */
4773 +
4774 +/*****************************************************************************/
4775 +/*
4776 + * begin an operation
4777 + * - place operation on busy queue
4778 + */
4779 +void afs_kafsasyncd_begin_op(afs_async_op_t *op)
4780 +{
4781 +       _enter("");
4782 +
4783 +       spin_lock(&kafsasyncd_async_lock);
4784 +
4785 +       init_waitqueue_entry(&op->waiter,kafsasyncd_task);
4786 +
4787 +       list_del(&op->link);
4788 +       list_add_tail(&op->link,&kafsasyncd_async_busyq);
4789 +
4790 +       spin_unlock(&kafsasyncd_async_lock);
4791 +
4792 +       _leave("");
4793 +} /* end afs_kafsasyncd_begin_op() */
4794 +
4795 +/*****************************************************************************/
4796 +/*
4797 + * request attention for an operation
4798 + * - move to attention queue
4799 + */
4800 +void afs_kafsasyncd_attend_op(afs_async_op_t *op)
4801 +{
4802 +       _enter("");
4803 +
4804 +       spin_lock(&kafsasyncd_async_lock);
4805 +
4806 +       list_del(&op->link);
4807 +       list_add_tail(&op->link,&kafsasyncd_async_attnq);
4808 +
4809 +       spin_unlock(&kafsasyncd_async_lock);
4810 +
4811 +       wake_up(&kafsasyncd_sleepq);
4812 +
4813 +       _leave("");
4814 +} /* end afs_kafsasyncd_attend_op() */
4815 +
4816 +/*****************************************************************************/
4817 +/*
4818 + * terminate an operation
4819 + * - remove from either queue
4820 + */
4821 +void afs_kafsasyncd_terminate_op(afs_async_op_t *op)
4822 +{
4823 +       _enter("");
4824 +
4825 +       spin_lock(&kafsasyncd_async_lock);
4826 +
4827 +       list_del_init(&op->link);
4828 +
4829 +       spin_unlock(&kafsasyncd_async_lock);
4830 +
4831 +       wake_up(&kafsasyncd_sleepq);
4832 +
4833 +       _leave("");
4834 +} /* end afs_kafsasyncd_terminate_op() */
4835 diff -urNp linux-5240/fs/afs/kafsasyncd.h linux-5250/fs/afs/kafsasyncd.h
4836 --- linux-5240/fs/afs/kafsasyncd.h      1970-01-01 01:00:00.000000000 +0100
4837 +++ linux-5250/fs/afs/kafsasyncd.h      
4838 @@ -0,0 +1,49 @@
4839 +/* kafsasyncd.h: AFS asynchronous operation daemon
4840 + *
4841 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4842 + * Written by David Howells (dhowells@redhat.com)
4843 + *
4844 + * This program is free software; you can redistribute it and/or
4845 + * modify it under the terms of the GNU General Public License
4846 + * as published by the Free Software Foundation; either version
4847 + * 2 of the License, or (at your option) any later version.
4848 + */
4849 +
4850 +#ifndef _LINUX_AFS_KAFSASYNCD_H
4851 +#define _LINUX_AFS_KAFSASYNCD_H
4852 +
4853 +#include "types.h"
4854 +
4855 +struct afs_async_op_ops {
4856 +       void (*attend)(afs_async_op_t *op);
4857 +       void (*discard)(afs_async_op_t *op);
4858 +};
4859 +
4860 +/*****************************************************************************/
4861 +/*
4862 + * asynchronous operation record
4863 + */
4864 +struct afs_async_op
4865 +{
4866 +       struct list_head                link;
4867 +       afs_server_t                    *server;        /* server being contacted */
4868 +       struct rxrpc_call               *call;          /* RxRPC call performing op */
4869 +       wait_queue_t                    waiter;         /* wait queue for kafsasyncd */
4870 +       const struct afs_async_op_ops   *ops;           /* operations */
4871 +};
4872 +
4873 +static inline void afs_async_op_init(afs_async_op_t *op, const struct afs_async_op_ops *ops)
4874 +{
4875 +       INIT_LIST_HEAD(&op->link);
4876 +       op->call = NULL;
4877 +       op->ops = ops;
4878 +}
4879 +
4880 +extern int afs_kafsasyncd_start(void);
4881 +extern void afs_kafsasyncd_stop(void);
4882 +
4883 +extern void afs_kafsasyncd_begin_op(afs_async_op_t *op);
4884 +extern void afs_kafsasyncd_attend_op(afs_async_op_t *op);
4885 +extern void afs_kafsasyncd_terminate_op(afs_async_op_t *op);
4886 +
4887 +#endif /* _LINUX_AFS_KAFSASYNCD_H */
4888 diff -urNp linux-5240/fs/afs/kafstimod.c linux-5250/fs/afs/kafstimod.c
4889 --- linux-5240/fs/afs/kafstimod.c       1970-01-01 01:00:00.000000000 +0100
4890 +++ linux-5250/fs/afs/kafstimod.c       
4891 @@ -0,0 +1,217 @@
4892 +/* kafstimod.c: AFS timeout daemon
4893 + *
4894 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4895 + * Written by David Howells (dhowells@redhat.com)
4896 + *
4897 + * This program is free software; you can redistribute it and/or
4898 + * modify it under the terms of the GNU General Public License
4899 + * as published by the Free Software Foundation; either version
4900 + * 2 of the License, or (at your option) any later version.
4901 + */
4902 +
4903 +#include <linux/version.h>
4904 +#include <linux/module.h>
4905 +#include <linux/init.h>
4906 +#include <linux/sched.h>
4907 +#include <linux/completion.h>
4908 +#include "cell.h"
4909 +#include "volume.h"
4910 +#include "kafstimod.h"
4911 +#include <asm/errno.h>
4912 +#include "internal.h"
4913 +
4914 +static DECLARE_COMPLETION(kafstimod_alive);
4915 +static DECLARE_COMPLETION(kafstimod_dead);
4916 +static DECLARE_WAIT_QUEUE_HEAD(kafstimod_sleepq);
4917 +static int kafstimod_die;
4918 +
4919 +static LIST_HEAD(kafstimod_list);
4920 +static spinlock_t kafstimod_lock = SPIN_LOCK_UNLOCKED;
4921 +
4922 +static int kafstimod(void *arg);
4923 +
4924 +/*****************************************************************************/
4925 +/*
4926 + * start the timeout daemon
4927 + */
4928 +int afs_kafstimod_start(void)
4929 +{
4930 +       int ret;
4931 +
4932 +       ret = kernel_thread(kafstimod,NULL,0);
4933 +       if (ret<0)
4934 +               return ret;
4935 +
4936 +       wait_for_completion(&kafstimod_alive);
4937 +
4938 +       return ret;
4939 +} /* end afs_kafstimod_start() */
4940 +
4941 +/*****************************************************************************/
4942 +/*
4943 + * stop the timeout daemon
4944 + */
4945 +void afs_kafstimod_stop(void)
4946 +{
4947 +       /* get rid of my daemon */
4948 +       kafstimod_die = 1;
4949 +       wake_up(&kafstimod_sleepq);
4950 +       wait_for_completion(&kafstimod_dead);
4951 +
4952 +} /* end afs_kafstimod_stop() */
4953 +
4954 +/*****************************************************************************/
4955 +/*
4956 + * timeout processing daemon
4957 + */
4958 +static int kafstimod(void *arg)
4959 +{
4960 +       DECLARE_WAITQUEUE(myself,current);
4961 +
4962 +       afs_timer_t *timer;
4963 +
4964 +       printk("kAFS: Started kafstimod %d\n",current->pid);
4965 +       strcpy(current->comm,"kafstimod");
4966 +
4967 +       daemonize();
4968 +
4969 +       complete(&kafstimod_alive);
4970 +
4971 +       /* only certain signals are of interest */
4972 +       spin_lock_irq(&current->sigmask_lock);
4973 +       siginitsetinv(&current->blocked,0);
4974 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
4975 +       recalc_sigpending();
4976 +#else
4977 +       recalc_sigpending(current);
4978 +#endif
4979 +       spin_unlock_irq(&current->sigmask_lock);
4980 +
4981 +       /* loop around looking for things to attend to */
4982 + loop:
4983 +       set_current_state(TASK_INTERRUPTIBLE);
4984 +       add_wait_queue(&kafstimod_sleepq,&myself);
4985 +
4986 +       for (;;) {
4987 +               unsigned long jif;
4988 +               signed long timeout;
4989 +
4990 +               /* deal with the server being asked to die */
4991 +               if (kafstimod_die) {
4992 +                       remove_wait_queue(&kafstimod_sleepq,&myself);
4993 +                       _leave("");
4994 +                       complete_and_exit(&kafstimod_dead,0);
4995 +               }
4996 +
4997 +               /* discard pending signals */
4998 +               while (signal_pending(current)) {
4999 +                       siginfo_t sinfo;
5000 +
5001 +                       spin_lock_irq(&current->sigmask_lock);
5002 +                       dequeue_signal(&current->blocked,&sinfo);
5003 +                       spin_unlock_irq(&current->sigmask_lock);
5004 +               }
5005 +
5006 +               /* work out the time to elapse before the next event */
5007 +               spin_lock(&kafstimod_lock);
5008 +               if (list_empty(&kafstimod_list)) {
5009 +                       timeout = MAX_SCHEDULE_TIMEOUT;
5010 +               }
5011 +               else {
5012 +                       timer = list_entry(kafstimod_list.next,afs_timer_t,link);
5013 +                       timeout = timer->timo_jif;
5014 +                       jif = jiffies;
5015 +
5016 +                       if (time_before_eq(timeout,jif))
5017 +                               goto immediate;
5018 +
5019 +                       else {
5020 +                               timeout = (long)timeout - (long)jiffies;
5021 +                       }
5022 +               }
5023 +               spin_unlock(&kafstimod_lock);
5024 +
5025 +               schedule_timeout(timeout);
5026 +
5027 +               set_current_state(TASK_INTERRUPTIBLE);
5028 +       }
5029 +
5030 +       /* the thing on the front of the queue needs processing
5031 +        * - we come here with the lock held and timer pointing to the expired entry
5032 +        */
5033 + immediate:
5034 +       remove_wait_queue(&kafstimod_sleepq,&myself);
5035 +       set_current_state(TASK_RUNNING);
5036 +
5037 +       _debug("@@@ Begin Timeout of %p",timer);
5038 +
5039 +       /* dequeue the timer */
5040 +       list_del_init(&timer->link);
5041 +       spin_unlock(&kafstimod_lock);
5042 +
5043 +       /* call the timeout function */
5044 +       timer->ops->timed_out(timer);
5045 +
5046 +       _debug("@@@ End Timeout");
5047 +       goto loop;
5048 +
5049 +} /* end kafstimod() */
5050 +
5051 +/*****************************************************************************/
5052 +/*
5053 + * (re-)queue a timer
5054 + */
5055 +void afs_kafstimod_add_timer(afs_timer_t *timer, unsigned long timeout)
5056 +{
5057 +       struct list_head *_p;
5058 +       afs_timer_t *ptimer;
5059 +
5060 +       _enter("%p,%lu",timer,timeout);
5061 +
5062 +       spin_lock(&kafstimod_lock);
5063 +
5064 +       list_del(&timer->link);
5065 +
5066 +       /* the timer was deferred or reset - put it back in the queue at the right place */
5067 +       timer->timo_jif = jiffies + timeout;
5068 +
5069 +       list_for_each(_p,&kafstimod_list) {
5070 +               ptimer = list_entry(_p,afs_timer_t,link);
5071 +               if (time_before(timer->timo_jif,ptimer->timo_jif))
5072 +                       break;
5073 +       }
5074 +
5075 +       list_add_tail(&timer->link,_p); /* insert before stopping point */
5076 +
5077 +       spin_unlock(&kafstimod_lock);
5078 +
5079 +       wake_up(&kafstimod_sleepq);
5080 +
5081 +       _leave("");
5082 +} /* end afs_kafstimod_queue_vlocation() */
5083 +
5084 +/*****************************************************************************/
5085 +/*
5086 + * dequeue a timer
5087 + * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
5088 + */
5089 +int afs_kafstimod_del_timer(afs_timer_t *timer)
5090 +{
5091 +       int ret = 0;
5092 +
5093 +       _enter("%p",timer);
5094 +
5095 +       spin_lock(&kafstimod_lock);
5096 +
5097 +       if (list_empty(&timer->link))
5098 +               ret = -ENOENT;
5099 +       else
5100 +               list_del_init(&timer->link);
5101 +
5102 +       spin_unlock(&kafstimod_lock);
5103 +
5104 +       wake_up(&kafstimod_sleepq);
5105 +
5106 +       _leave(" = %d",ret);
5107 +       return ret;
5108 +} /* end afs_kafstimod_del_timer() */
5109 diff -urNp linux-5240/fs/afs/kafstimod.h linux-5250/fs/afs/kafstimod.h
5110 --- linux-5240/fs/afs/kafstimod.h       1970-01-01 01:00:00.000000000 +0100
5111 +++ linux-5250/fs/afs/kafstimod.h       
5112 @@ -0,0 +1,45 @@
5113 +/* kafstimod.h: AFS timeout daemon
5114 + *
5115 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5116 + * Written by David Howells (dhowells@redhat.com)
5117 + *
5118 + * This program is free software; you can redistribute it and/or
5119 + * modify it under the terms of the GNU General Public License
5120 + * as published by the Free Software Foundation; either version
5121 + * 2 of the License, or (at your option) any later version.
5122 + */
5123 +
5124 +#ifndef _LINUX_AFS_KAFSTIMOD_H
5125 +#define _LINUX_AFS_KAFSTIMOD_H
5126 +
5127 +#include "types.h"
5128 +
5129 +struct afs_timer_ops {
5130 +       /* called when the front of the timer queue has timed out */
5131 +       void (*timed_out)(struct afs_timer *timer);
5132 +};
5133 +
5134 +/*****************************************************************************/
5135 +/*
5136 + * AFS timer/timeout record
5137 + */
5138 +struct afs_timer
5139 +{
5140 +       struct list_head                link;           /* link in timer queue */
5141 +       unsigned long                   timo_jif;       /* timeout time */
5142 +       const struct afs_timer_ops      *ops;           /* timeout expiry function */
5143 +};
5144 +
5145 +static inline void afs_timer_init(afs_timer_t *timer, const struct afs_timer_ops *ops)
5146 +{
5147 +       INIT_LIST_HEAD(&timer->link);
5148 +       timer->ops = ops;
5149 +}
5150 +
5151 +extern int afs_kafstimod_start(void);
5152 +extern void afs_kafstimod_stop(void);
5153 +
5154 +extern void afs_kafstimod_add_timer(afs_timer_t *timer, unsigned long timeout);
5155 +extern int afs_kafstimod_del_timer(afs_timer_t *timer);
5156 +
5157 +#endif /* _LINUX_AFS_KAFSTIMOD_H */
5158 diff -urNp linux-5240/fs/afs/main.c linux-5250/fs/afs/main.c
5159 --- linux-5240/fs/afs/main.c    1970-01-01 01:00:00.000000000 +0100
5160 +++ linux-5250/fs/afs/main.c    
5161 @@ -0,0 +1,193 @@
5162 +/* main.c: AFS client file system
5163 + *
5164 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5165 + * Written by David Howells (dhowells@redhat.com)
5166 + *
5167 + * This program is free software; you can redistribute it and/or
5168 + * modify it under the terms of the GNU General Public License
5169 + * as published by the Free Software Foundation; either version
5170 + * 2 of the License, or (at your option) any later version.
5171 + */
5172 +
5173 +#include <linux/module.h>
5174 +#include <linux/init.h>
5175 +#include <linux/sched.h>
5176 +#include <linux/completion.h>
5177 +#include <rxrpc/rxrpc.h>
5178 +#include <rxrpc/transport.h>
5179 +#include <rxrpc/call.h>
5180 +#include <rxrpc/peer.h>
5181 +#include "cell.h"
5182 +#include "server.h"
5183 +#include "fsclient.h"
5184 +#include "cmservice.h"
5185 +#include "kafstimod.h"
5186 +#include "kafsasyncd.h"
5187 +#include "internal.h"
5188 +
5189 +struct rxrpc_transport *afs_transport;
5190 +
5191 +static int afs_init(void);
5192 +static void afs_exit(void);
5193 +static int afs_adding_peer(struct rxrpc_peer *peer);
5194 +static void afs_discarding_peer(struct rxrpc_peer *peer);
5195 +
5196 +module_init(afs_init);
5197 +module_exit(afs_exit);
5198 +
5199 +MODULE_DESCRIPTION("AFS Client File System");
5200 +MODULE_AUTHOR("Red Hat, Inc.");
5201 +MODULE_LICENSE("GPL");
5202 +
5203 +static struct rxrpc_peer_ops afs_peer_ops = {
5204 +       adding:         afs_adding_peer,
5205 +       discarding:     afs_discarding_peer,
5206 +};
5207 +
5208 +struct list_head afs_cb_hash_tbl[AFS_CB_HASH_COUNT];
5209 +spinlock_t afs_cb_hash_lock = SPIN_LOCK_UNLOCKED;
5210 +
5211 +/*****************************************************************************/
5212 +/*
5213 + * initialise the AFS client FS module
5214 + */
5215 +static int afs_init(void)
5216 +{
5217 +       int loop, ret;
5218 +
5219 +       printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n");
5220 +
5221 +       /* initialise the callback hash table */
5222 +       spin_lock_init(&afs_cb_hash_lock);
5223 +       for (loop=AFS_CB_HASH_COUNT-1; loop>=0; loop--)
5224 +               INIT_LIST_HEAD(&afs_cb_hash_tbl[loop]);
5225 +
5226 +       /* register the /proc stuff */
5227 +       ret = afs_proc_init();
5228 +       if (ret<0)
5229 +               return ret;
5230 +
5231 +       /* initialise the cell DB */
5232 +       ret = afs_cell_init();
5233 +       if (ret<0)
5234 +               goto error;
5235 +
5236 +       /* start the timeout daemon */
5237 +       ret = afs_kafstimod_start();
5238 +       if (ret<0)
5239 +               goto error;
5240 +
5241 +       /* start the async operation daemon */
5242 +       ret = afs_kafsasyncd_start();
5243 +       if (ret<0)
5244 +               goto error_kafstimod;
5245 +
5246 +       /* create the RxRPC transport */
5247 +       ret = rxrpc_create_transport(7001,&afs_transport);
5248 +       if (ret<0)
5249 +               goto error_kafsasyncd;
5250 +
5251 +       afs_transport->peer_ops = &afs_peer_ops;
5252 +
5253 +       /* register the filesystems */
5254 +       ret = afs_fs_init();
5255 +       if (ret<0)
5256 +               goto error_transport;
5257 +
5258 +       return ret;
5259 +
5260 + error_transport:
5261 +       rxrpc_put_transport(afs_transport);
5262 + error_kafsasyncd:
5263 +       afs_kafsasyncd_stop();
5264 + error_kafstimod:
5265 +       afs_kafstimod_stop();
5266 + error:
5267 +       afs_cell_purge();
5268 +       afs_proc_cleanup();
5269 +       printk(KERN_ERR "kAFS: failed to register: %d\n",ret);
5270 +       return ret;
5271 +} /* end afs_init() */
5272 +
5273 +/*****************************************************************************/
5274 +/*
5275 + * clean up on module removal
5276 + */
5277 +static void afs_exit(void)
5278 +{
5279 +       printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n");
5280 +
5281 +       afs_fs_exit();
5282 +       rxrpc_put_transport(afs_transport);
5283 +       afs_kafstimod_stop();
5284 +       afs_kafsasyncd_stop();
5285 +       afs_cell_purge();
5286 +       afs_proc_cleanup();
5287 +
5288 +} /* end afs_exit() */
5289 +
5290 +/*****************************************************************************/
5291 +/*
5292 + * notification that new peer record is being added
5293 + * - called from krxsecd
5294 + * - return an error to induce an abort
5295 + * - mustn't sleep (caller holds an rwlock)
5296 + */
5297 +static int afs_adding_peer(struct rxrpc_peer *peer)
5298 +{
5299 +       afs_server_t *server;
5300 +       int ret;
5301 +
5302 +       _debug("kAFS: Adding new peer %08x\n",ntohl(peer->addr.s_addr));
5303 +
5304 +       /* determine which server the peer resides in (if any) */
5305 +       ret = afs_server_find_by_peer(peer,&server);
5306 +       if (ret<0)
5307 +               return ret; /* none that we recognise, so abort */
5308 +
5309 +       _debug("Server %p{u=%d}\n",server,atomic_read(&server->usage));
5310 +
5311 +       _debug("Cell %p{u=%d}\n",server->cell,atomic_read(&server->cell->usage));
5312 +
5313 +       /* cross-point the structs under a global lock */
5314 +       spin_lock(&afs_server_peer_lock);
5315 +       peer->user = server;
5316 +       server->peer = peer;
5317 +       spin_unlock(&afs_server_peer_lock);
5318 +
5319 +       afs_put_server(server);
5320 +
5321 +       return 0;
5322 +} /* end afs_adding_peer() */
5323 +
5324 +/*****************************************************************************/
5325 +/*
5326 + * notification that a peer record is being discarded
5327 + * - called from krxiod or krxsecd
5328 + */
5329 +static void afs_discarding_peer(struct rxrpc_peer *peer)
5330 +{
5331 +       afs_server_t *server;
5332 +
5333 +       _enter("%p",peer);
5334 +
5335 +       _debug("Discarding peer %08x (rtt=%lu.%lumS)\n",
5336 +              ntohl(peer->addr.s_addr),
5337 +              peer->rtt/1000,
5338 +              peer->rtt%1000);
5339 +
5340 +       /* uncross-point the structs under a global lock */
5341 +       spin_lock(&afs_server_peer_lock);
5342 +       server = peer->user;
5343 +       if (server) {
5344 +               peer->user = NULL;
5345 +               server->peer = NULL;
5346 +
5347 +               //_debug("Server %p{u=%d}\n",server,atomic_read(&server->usage));
5348 +               //_debug("Cell %p{u=%d}\n",server->cell,atomic_read(&server->cell->usage));
5349 +       }
5350 +       spin_unlock(&afs_server_peer_lock);
5351 +
5352 +       _leave("");
5353 +
5354 +} /* end afs_discarding_peer() */
5355 diff -urNp linux-5240/fs/afs/Makefile linux-5250/fs/afs/Makefile
5356 --- linux-5240/fs/afs/Makefile  1970-01-01 01:00:00.000000000 +0100
5357 +++ linux-5250/fs/afs/Makefile  
5358 @@ -0,0 +1,34 @@
5359 +#
5360 +# Makefile for Red Hat Linux AFS client.
5361 +#
5362 +
5363 +kafs-objs := \
5364 +       callback.o \
5365 +       cell.o \
5366 +       cmservice.o \
5367 +       dir.o \
5368 +       file.o \
5369 +       fsclient.o \
5370 +       inode.o \
5371 +       kafsasyncd.o \
5372 +       kafstimod.o \
5373 +       main.o \
5374 +       misc.o \
5375 +       mntpt.o \
5376 +       proc.o \
5377 +       server.o \
5378 +       super.o \
5379 +       vlclient.o \
5380 +       vlocation.o \
5381 +       vnode.o \
5382 +       volume.o
5383 +
5384 +#      cache.o
5385 +
5386 +obj-m  := kafs.o
5387 +
5388 +# superfluous for 2.5, but needed for 2.4..
5389 +kafs.o: $(kafs-objs)
5390 +       $(LD) -r -o kafs.o $(kafs-objs)
5391 +
5392 +include $(TOPDIR)/Rules.make
5393 diff -urNp linux-5240/fs/afs/misc.c linux-5250/fs/afs/misc.c
5394 --- linux-5240/fs/afs/misc.c    1970-01-01 01:00:00.000000000 +0100
5395 +++ linux-5250/fs/afs/misc.c    
5396 @@ -0,0 +1,39 @@
5397 +/* misc.c: miscellaneous bits
5398 + *
5399 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5400 + * Written by David Howells (dhowells@redhat.com)
5401 + *
5402 + * This program is free software; you can redistribute it and/or
5403 + * modify it under the terms of the GNU General Public License
5404 + * as published by the Free Software Foundation; either version
5405 + * 2 of the License, or (at your option) any later version.
5406 + */
5407 +
5408 +#include <linux/kernel.h>
5409 +#include <linux/module.h>
5410 +#include <linux/errno.h>
5411 +#include "errors.h"
5412 +#include "internal.h"
5413 +
5414 +/*****************************************************************************/
5415 +/*
5416 + * convert an AFS abort code to a Linux error number
5417 + */
5418 +int afs_abort_to_error(int abortcode)
5419 +{
5420 +       switch (abortcode) {
5421 +       case VSALVAGE:          return -EIO;
5422 +       case VNOVNODE:          return -ENOENT;
5423 +       case VNOVOL:            return -ENXIO;
5424 +       case VVOLEXISTS:        return -EEXIST;
5425 +       case VNOSERVICE:        return -EIO;
5426 +       case VOFFLINE:          return -ENOENT;
5427 +       case VONLINE:           return -EEXIST;
5428 +       case VDISKFULL:         return -ENOSPC;
5429 +       case VOVERQUOTA:        return -EDQUOT;
5430 +       case VBUSY:             return -EBUSY;
5431 +       case VMOVED:            return -ENXIO;
5432 +       default:                return -EIO;
5433 +       }
5434 +
5435 +} /* end afs_abort_to_error() */
5436 diff -urNp linux-5240/fs/afs/mntpt.c linux-5250/fs/afs/mntpt.c
5437 --- linux-5240/fs/afs/mntpt.c   1970-01-01 01:00:00.000000000 +0100
5438 +++ linux-5250/fs/afs/mntpt.c   
5439 @@ -0,0 +1,114 @@
5440 +/* mntpt.c: mountpoint management
5441 + *
5442 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5443 + * Written by David Howells (dhowells@redhat.com)
5444 + *
5445 + * This program is free software; you can redistribute it and/or
5446 + * modify it under the terms of the GNU General Public License
5447 + * as published by the Free Software Foundation; either version
5448 + * 2 of the License, or (at your option) any later version.
5449 + */
5450 +
5451 +#include <linux/kernel.h>
5452 +#include <linux/module.h>
5453 +#include <linux/init.h>
5454 +#include <linux/sched.h>
5455 +#include <linux/slab.h>
5456 +#include <linux/fs.h>
5457 +#include <linux/pagemap.h>
5458 +#include "volume.h"
5459 +#include "vnode.h"
5460 +#include "cache.h"
5461 +#include "internal.h"
5462 +
5463 +
5464 +static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry);
5465 +static int afs_mntpt_open(struct inode *inode, struct file *file);
5466 +//static int afs_mntpt_readlink(struct dentry *dentry, char *buf, int size);
5467 +
5468 +struct file_operations afs_mntpt_file_operations = {
5469 +       open:           afs_mntpt_open,
5470 +};
5471 +
5472 +struct inode_operations afs_mntpt_inode_operations = {
5473 +       lookup:         afs_mntpt_lookup,
5474 +       readlink:       page_readlink,
5475 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
5476 +       getattr:        afs_inode_getattr,
5477 +#else
5478 +       revalidate:     afs_inode_revalidate,
5479 +#endif
5480 +};
5481 +
5482 +/*****************************************************************************/
5483 +/*
5484 + * check a symbolic link to see whether it actually encodes a mountpoint
5485 + * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
5486 + */
5487 +int afs_mntpt_check_symlink(afs_vnode_t *vnode)
5488 +{
5489 +       struct page *page;
5490 +       size_t size;
5491 +       char *buf;
5492 +       int ret;
5493 +
5494 +       _enter("{%u,%u}",vnode->fid.vnode,vnode->fid.unique);
5495 +
5496 +       /* read the contents of the symlink into the pagecache */
5497 +       page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping,0,
5498 +                              (filler_t*)AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage,NULL);
5499 +       if (IS_ERR(page)) {
5500 +               ret = PTR_ERR(page);
5501 +               goto out;
5502 +       }
5503 +
5504 +       ret = -EIO;
5505 +       wait_on_page_locked(page);
5506 +       buf = kmap(page);
5507 +       if (!PageUptodate(page))
5508 +               goto out_free;
5509 +       if (PageError(page))
5510 +               goto out_free;
5511 +
5512 +       /* examine the symlink's contents */
5513 +       size = vnode->status.size;
5514 +       _debug("symlink to %*.*s",size,size,buf);
5515 +
5516 +       if (size>2 &&
5517 +           (buf[0]=='%' || buf[0]=='#') &&
5518 +           buf[size-1]=='.'
5519 +           ) {
5520 +               _debug("symlink is a mountpoint");
5521 +               spin_lock(&vnode->lock);
5522 +               vnode->flags |= AFS_VNODE_MOUNTPOINT;
5523 +               spin_unlock(&vnode->lock);
5524 +       }
5525 +
5526 +       ret = 0;
5527 +
5528 + out_free:
5529 +       kunmap(page);
5530 +       page_cache_release(page);
5531 + out:
5532 +       _leave(" = %d",ret);
5533 +       return ret;
5534 +
5535 +} /* end afs_mntpt_check_symlink() */
5536 +
5537 +/*****************************************************************************/
5538 +/*
5539 + * no valid lookup procedure on this sort of dir
5540 + */
5541 +static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry)
5542 +{
5543 +       return ERR_PTR(-EREMOTE);
5544 +} /* end afs_mntpt_lookup() */
5545 +
5546 +/*****************************************************************************/
5547 +/*
5548 + * no valid open procedure on this sort of dir
5549 + */
5550 +static int afs_mntpt_open(struct inode *inode, struct file *file)
5551 +{
5552 +       return -EREMOTE;
5553 +} /* end afs_mntpt_open() */
5554 diff -urNp linux-5240/fs/afs/mount.h linux-5250/fs/afs/mount.h
5555 --- linux-5240/fs/afs/mount.h   1970-01-01 01:00:00.000000000 +0100
5556 +++ linux-5250/fs/afs/mount.h   
5557 @@ -0,0 +1,23 @@
5558 +/* mount.h: mount parameters
5559 + *
5560 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5561 + * Written by David Howells (dhowells@redhat.com)
5562 + *
5563 + * This program is free software; you can redistribute it and/or
5564 + * modify it under the terms of the GNU General Public License
5565 + * as published by the Free Software Foundation; either version
5566 + * 2 of the License, or (at your option) any later version.
5567 + */
5568 +
5569 +#ifndef _LINUX_AFS_MOUNT_H
5570 +#define _LINUX_AFS_MOUNT_H
5571 +
5572 +struct afs_mountdata {
5573 +       const char              *volume;        /* name of volume */
5574 +       const char              *cell;          /* name of cell containing volume */
5575 +       const char              *cache;         /* name of cache block device */
5576 +       size_t                  nservers;       /* number of server addresses listed */
5577 +       u_int32_t               servers[10];    /* IP addresses of servers in this cell */
5578 +};
5579 +
5580 +#endif /* _LINUX_AFS_MOUNT_H */
5581 diff -urNp linux-5240/fs/afs/proc.c linux-5250/fs/afs/proc.c
5582 --- linux-5240/fs/afs/proc.c    1970-01-01 01:00:00.000000000 +0100
5583 +++ linux-5250/fs/afs/proc.c    
5584 @@ -0,0 +1,744 @@
5585 +/* proc.c: /proc interface for AFS
5586 + *
5587 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5588 + * Written by David Howells (dhowells@redhat.com)
5589 + *
5590 + * This program is free software; you can redistribute it and/or
5591 + * modify it under the terms of the GNU General Public License
5592 + * as published by the Free Software Foundation; either version
5593 + * 2 of the License, or (at your option) any later version.
5594 + */
5595 +
5596 +#include <linux/sched.h>
5597 +#include <linux/slab.h>
5598 +#include <linux/module.h>
5599 +#include <linux/proc_fs.h>
5600 +#include <linux/seq_file.h>
5601 +#include "cell.h"
5602 +#include "volume.h"
5603 +#include <asm/uaccess.h>
5604 +#include "internal.h"
5605 +
5606 +static struct proc_dir_entry *proc_afs;
5607 +
5608 +
5609 +static int afs_proc_cells_open(struct inode *inode, struct file *file);
5610 +static void *afs_proc_cells_start(struct seq_file *p, loff_t *pos);
5611 +static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos);
5612 +static void afs_proc_cells_stop(struct seq_file *p, void *v);
5613 +static int afs_proc_cells_show(struct seq_file *m, void *v);
5614 +static ssize_t afs_proc_cells_write(struct file *file, const char *buf, size_t size, loff_t *_pos);
5615 +
5616 +static struct seq_operations afs_proc_cells_ops = {
5617 +       start:  afs_proc_cells_start,
5618 +       next:   afs_proc_cells_next,
5619 +       stop:   afs_proc_cells_stop,
5620 +       show:   afs_proc_cells_show,
5621 +};
5622 +
5623 +static struct file_operations afs_proc_cells_fops = {
5624 +       open:           afs_proc_cells_open,
5625 +       read:           seq_read,
5626 +       write:          afs_proc_cells_write,
5627 +       llseek:         seq_lseek,
5628 +       release:        seq_release,
5629 +};
5630 +
5631 +static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file);
5632 +static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file);
5633 +static void *afs_proc_cell_volumes_start(struct seq_file *p, loff_t *pos);
5634 +static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, loff_t *pos);
5635 +static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v);
5636 +static int afs_proc_cell_volumes_show(struct seq_file *m, void *v);
5637 +
5638 +static struct seq_operations afs_proc_cell_volumes_ops = {
5639 +       start:  afs_proc_cell_volumes_start,
5640 +       next:   afs_proc_cell_volumes_next,
5641 +       stop:   afs_proc_cell_volumes_stop,
5642 +       show:   afs_proc_cell_volumes_show,
5643 +};
5644 +
5645 +static struct file_operations afs_proc_cell_volumes_fops = {
5646 +       open:           afs_proc_cell_volumes_open,
5647 +       read:           seq_read,
5648 +       llseek:         seq_lseek,
5649 +       release:        afs_proc_cell_volumes_release,
5650 +};
5651 +
5652 +static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file);
5653 +static int afs_proc_cell_vlservers_release(struct inode *inode, struct file *file);
5654 +static void *afs_proc_cell_vlservers_start(struct seq_file *p, loff_t *pos);
5655 +static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, loff_t *pos);
5656 +static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v);
5657 +static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v);
5658 +
5659 +static struct seq_operations afs_proc_cell_vlservers_ops = {
5660 +       start:  afs_proc_cell_vlservers_start,
5661 +       next:   afs_proc_cell_vlservers_next,
5662 +       stop:   afs_proc_cell_vlservers_stop,
5663 +       show:   afs_proc_cell_vlservers_show,
5664 +};
5665 +
5666 +static struct file_operations afs_proc_cell_vlservers_fops = {
5667 +       open:           afs_proc_cell_vlservers_open,
5668 +       read:           seq_read,
5669 +       llseek:         seq_lseek,
5670 +       release:        afs_proc_cell_vlservers_release,
5671 +};
5672 +
5673 +static int afs_proc_cell_servers_open(struct inode *inode, struct file *file);
5674 +static int afs_proc_cell_servers_release(struct inode *inode, struct file *file);
5675 +static void *afs_proc_cell_servers_start(struct seq_file *p, loff_t *pos);
5676 +static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, loff_t *pos);
5677 +static void afs_proc_cell_servers_stop(struct seq_file *p, void *v);
5678 +static int afs_proc_cell_servers_show(struct seq_file *m, void *v);
5679 +
5680 +static struct seq_operations afs_proc_cell_servers_ops = {
5681 +       start:  afs_proc_cell_servers_start,
5682 +       next:   afs_proc_cell_servers_next,
5683 +       stop:   afs_proc_cell_servers_stop,
5684 +       show:   afs_proc_cell_servers_show,
5685 +};
5686 +
5687 +static struct file_operations afs_proc_cell_servers_fops = {
5688 +       open:           afs_proc_cell_servers_open,
5689 +       read:           seq_read,
5690 +       llseek:         seq_lseek,
5691 +       release:        afs_proc_cell_servers_release,
5692 +};
5693 +
5694 +/*****************************************************************************/
5695 +/*
5696 + * initialise the /proc/fs/afs/ directory
5697 + */
5698 +int afs_proc_init(void)
5699 +{
5700 +       struct proc_dir_entry *p;
5701 +
5702 +       _enter("");
5703 +
5704 +       proc_afs = proc_mkdir("fs/afs",NULL);
5705 +       if (!proc_afs)
5706 +               goto error;
5707 +       proc_afs->owner = THIS_MODULE;
5708 +
5709 +       p = create_proc_entry("cells",0,proc_afs);
5710 +       if (!p)
5711 +               goto error_proc;
5712 +       p->proc_fops = &afs_proc_cells_fops;
5713 +       p->owner = THIS_MODULE;
5714 +
5715 +       _leave(" = 0");
5716 +       return 0;
5717 +
5718 +#if 0
5719 + error_cells:
5720 +       remove_proc_entry("cells",proc_afs);
5721 +#endif
5722 + error_proc:
5723 +       remove_proc_entry("fs/afs",NULL);
5724 + error:
5725 +       _leave(" = -ENOMEM");
5726 +       return -ENOMEM;
5727 +
5728 +} /* end afs_proc_init() */
5729 +
5730 +/*****************************************************************************/
5731 +/*
5732 + * clean up the /proc/fs/afs/ directory
5733 + */
5734 +void afs_proc_cleanup(void)
5735 +{
5736 +       remove_proc_entry("cells",proc_afs);
5737 +
5738 +       remove_proc_entry("fs/afs",NULL);
5739 +
5740 +} /* end afs_proc_cleanup() */
5741 +
5742 +/*****************************************************************************/
5743 +/*
5744 + * open "/proc/fs/afs/cells" which provides a summary of extant cells
5745 + */
5746 +static int afs_proc_cells_open(struct inode *inode, struct file *file)
5747 +{
5748 +       struct seq_file *m;
5749 +       int ret;
5750 +
5751 +       ret = seq_open(file,&afs_proc_cells_ops);
5752 +       if (ret<0)
5753 +               return ret;
5754 +
5755 +       m = file->private_data;
5756 +       m->private = PDE(inode)->data;
5757 +
5758 +       return 0;
5759 +} /* end afs_proc_cells_open() */
5760 +
5761 +/*****************************************************************************/
5762 +/*
5763 + * set up the iterator to start reading from the cells list and return the first item
5764 + */
5765 +static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
5766 +{
5767 +       struct list_head *_p;
5768 +       loff_t pos = *_pos;
5769 +
5770 +       /* lock the list against modification */
5771 +       down_read(&afs_proc_cells_sem);
5772 +
5773 +       /* allow for the header line */
5774 +       if (!pos)
5775 +               return (void *)1;
5776 +       pos--;
5777 +
5778 +       /* find the n'th element in the list */
5779 +       list_for_each(_p,&afs_proc_cells)
5780 +               if (!pos--)
5781 +                       break;
5782 +
5783 +       return _p!=&afs_proc_cells ? _p : NULL;
5784 +} /* end afs_proc_cells_start() */
5785 +
5786 +/*****************************************************************************/
5787 +/*
5788 + * move to next cell in cells list
5789 + */
5790 +static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos)
5791 +{
5792 +       struct list_head *_p;
5793 +
5794 +       (*pos)++;
5795 +
5796 +       _p = v;
5797 +       _p = v==(void*)1 ? afs_proc_cells.next : _p->next;
5798 +
5799 +       return _p!=&afs_proc_cells ? _p : NULL;
5800 +} /* end afs_proc_cells_next() */
5801 +
5802 +/*****************************************************************************/
5803 +/*
5804 + * clean up after reading from the cells list
5805 + */
5806 +static void afs_proc_cells_stop(struct seq_file *p, void *v)
5807 +{
5808 +       up_read(&afs_proc_cells_sem);
5809 +
5810 +} /* end afs_proc_cells_stop() */
5811 +
5812 +/*****************************************************************************/
5813 +/*
5814 + * display a header line followed by a load of cell lines
5815 + */
5816 +static int afs_proc_cells_show(struct seq_file *m, void *v)
5817 +{
5818 +       afs_cell_t *cell = list_entry(v,afs_cell_t,proc_link);
5819 +
5820 +       /* display header on line 1 */
5821 +       if (v == (void *)1) {
5822 +               seq_puts(m, "USE IX  NAME\n");
5823 +               return 0;
5824 +       }
5825 +
5826 +       /* display one cell per line on subsequent lines */
5827 +       seq_printf(m,"%3d %3u %s\n",
5828 +                  atomic_read(&cell->usage),
5829 +                  cell->cache_ix,
5830 +                  cell->name
5831 +                  );
5832 +
5833 +       return 0;
5834 +} /* end afs_proc_cells_show() */
5835 +
5836 +/*****************************************************************************/
5837 +/*
5838 + * handle writes to /proc/fs/afs/cells
5839 + * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]*
5840 + */
5841 +static ssize_t afs_proc_cells_write(struct file *file, const char *buf, size_t size, loff_t *_pos)
5842 +{
5843 +       char *kbuf, *name, *args;
5844 +       int ret;
5845 +
5846 +       /* start by dragging the command into memory */
5847 +       if (size<=1 || size>=PAGE_SIZE)
5848 +               return -EINVAL;
5849 +
5850 +       kbuf = kmalloc(size+1,GFP_KERNEL);
5851 +       if (!kbuf)
5852 +               return -ENOMEM;
5853 +
5854 +       ret = -EFAULT;
5855 +       if (copy_from_user(kbuf,buf,size)!=0)
5856 +               goto done;
5857 +       kbuf[size] = 0;
5858 +
5859 +       /* trim to first NL */
5860 +       name = memchr(kbuf,'\n',size);
5861 +       if (name) *name = 0;
5862 +
5863 +       /* split into command, name and argslist */
5864 +       name = strchr(kbuf,' ');
5865 +       if (!name) goto inval;
5866 +       do { *name++ = 0; } while(*name==' ');
5867 +       if (!*name) goto inval;
5868 +
5869 +       args = strchr(name,' ');
5870 +       if (!args) goto inval;
5871 +       do { *args++ = 0; } while(*args==' ');
5872 +       if (!*args) goto inval;
5873 +
5874 +       /* determine command to perform */
5875 +       _debug("cmd=%s name=%s args=%s",kbuf,name,args);
5876 +
5877 +       if (strcmp(kbuf,"add")==0) {
5878 +               afs_cell_t *cell;
5879 +               ret = afs_cell_create(name,args,&cell);
5880 +               if (ret<0)
5881 +                       goto done;
5882 +
5883 +               printk("kAFS: Added new cell '%s'\n",name);
5884 +       }
5885 +       else {
5886 +               goto inval;
5887 +       }
5888 +
5889 +       ret = size;
5890 +
5891 + done:
5892 +       kfree(kbuf);
5893 +       _leave(" = %d",ret);
5894 +       return ret;
5895 +
5896 + inval:
5897 +       ret = -EINVAL;
5898 +       printk("kAFS: Invalid Command on /proc/fs/afs/cells file\n");
5899 +       goto done;
5900 +} /* end afs_proc_cells_write() */
5901 +
5902 +/*****************************************************************************/
5903 +/*
5904 + * initialise /proc/fs/afs/<cell>/
5905 + */
5906 +int afs_proc_cell_setup(afs_cell_t *cell)
5907 +{
5908 +       struct proc_dir_entry *p;
5909 +
5910 +       _enter("%p{%s}",cell,cell->name);
5911 +
5912 +       cell->proc_dir = proc_mkdir(cell->name,proc_afs);
5913 +       if (!cell->proc_dir)
5914 +               return -ENOMEM;
5915 +
5916 +       p = create_proc_entry("servers",0,cell->proc_dir);
5917 +       if (!p)
5918 +               goto error_proc;
5919 +       p->proc_fops = &afs_proc_cell_servers_fops;
5920 +       p->owner = THIS_MODULE;
5921 +       p->data = cell;
5922 +
5923 +       p = create_proc_entry("vlservers",0,cell->proc_dir);
5924 +       if (!p)
5925 +               goto error_servers;
5926 +       p->proc_fops = &afs_proc_cell_vlservers_fops;
5927 +       p->owner = THIS_MODULE;
5928 +       p->data = cell;
5929 +
5930 +       p = create_proc_entry("volumes",0,cell->proc_dir);
5931 +       if (!p)
5932 +               goto error_vlservers;
5933 +       p->proc_fops = &afs_proc_cell_volumes_fops;
5934 +       p->owner = THIS_MODULE;
5935 +       p->data = cell;
5936 +
5937 +       _leave(" = 0");
5938 +       return 0;
5939 +
5940 + error_vlservers:
5941 +       remove_proc_entry("vlservers",cell->proc_dir);
5942 + error_servers:
5943 +       remove_proc_entry("servers",cell->proc_dir);
5944 + error_proc:
5945 +       remove_proc_entry(cell->name,proc_afs);
5946 +       _leave(" = -ENOMEM");
5947 +       return -ENOMEM;
5948 +} /* end afs_proc_cell_setup() */
5949 +
5950 +/*****************************************************************************/
5951 +/*
5952 + * remove /proc/fs/afs/<cell>/
5953 + */
5954 +void afs_proc_cell_remove(afs_cell_t *cell)
5955 +{
5956 +       _enter("");
5957 +
5958 +       remove_proc_entry("volumes",cell->proc_dir);
5959 +       remove_proc_entry("vlservers",cell->proc_dir);
5960 +       remove_proc_entry("servers",cell->proc_dir);
5961 +       remove_proc_entry(cell->name,proc_afs);
5962 +
5963 +       _leave("");
5964 +} /* end afs_proc_cell_remove() */
5965 +
5966 +/*****************************************************************************/
5967 +/*
5968 + * open "/proc/fs/afs/<cell>/volumes" which provides a summary of extant cells
5969 + */
5970 +static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file)
5971 +{
5972 +       struct seq_file *m;
5973 +       afs_cell_t *cell;
5974 +       int ret;
5975 +
5976 +       cell = afs_get_cell_maybe((afs_cell_t**)&PDE(inode)->data);
5977 +       if (!cell)
5978 +               return -ENOENT;
5979 +
5980 +       ret = seq_open(file,&afs_proc_cell_volumes_ops);
5981 +       if (ret<0)
5982 +               return ret;
5983 +
5984 +       m = file->private_data;
5985 +       m->private = cell;
5986 +
5987 +       return 0;
5988 +} /* end afs_proc_cell_volumes_open() */
5989 +
5990 +/*****************************************************************************/
5991 +/*
5992 + * close the file and release the ref to the cell
5993 + */
5994 +static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file)
5995 +{
5996 +       afs_cell_t *cell = PDE(inode)->data;
5997 +       int ret;
5998 +
5999 +       ret = seq_release(inode,file);
6000 +
6001 +       afs_put_cell(cell);
6002 +
6003 +} /* end afs_proc_cell_volumes_release() */
6004 +
6005 +/*****************************************************************************/
6006 +/*
6007 + * set up the iterator to start reading from the cells list and return the first item
6008 + */
6009 +static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
6010 +{
6011 +       struct list_head *_p;
6012 +       afs_cell_t *cell = m->private;
6013 +       loff_t pos = *_pos;
6014 +
6015 +       _enter("cell=%p pos=%Ld",cell,*_pos);
6016 +
6017 +       /* lock the list against modification */
6018 +       down_read(&cell->vl_sem);
6019 +
6020 +       /* allow for the header line */
6021 +       if (!pos)
6022 +               return (void *)1;
6023 +       pos--;
6024 +
6025 +       /* find the n'th element in the list */
6026 +       list_for_each(_p,&cell->vl_list)
6027 +               if (!pos--)
6028 +                       break;
6029 +
6030 +       return _p!=&cell->vl_list ? _p : NULL;
6031 +} /* end afs_proc_cell_volumes_start() */
6032 +
6033 +/*****************************************************************************/
6034 +/*
6035 + * move to next cell in cells list
6036 + */
6037 +static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, loff_t *_pos)
6038 +{
6039 +       struct list_head *_p;
6040 +       afs_cell_t *cell = p->private;
6041 +
6042 +       _enter("cell=%p pos=%Ld",cell,*_pos);
6043 +
6044 +       (*_pos)++;
6045 +
6046 +       _p = v;
6047 +       _p = v==(void*)1 ? cell->vl_list.next : _p->next;
6048 +
6049 +       return _p!=&cell->vl_list ? _p : NULL;
6050 +} /* end afs_proc_cell_volumes_next() */
6051 +
6052 +/*****************************************************************************/
6053 +/*
6054 + * clean up after reading from the cells list
6055 + */
6056 +static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v)
6057 +{
6058 +       afs_cell_t *cell = p->private;
6059 +
6060 +       up_read(&cell->vl_sem);
6061 +
6062 +} /* end afs_proc_cell_volumes_stop() */
6063 +
6064 +/*****************************************************************************/
6065 +/*
6066 + * display a header line followed by a load of volume lines
6067 + */
6068 +static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
6069 +{
6070 +       afs_vlocation_t *vlocation = list_entry(v,afs_vlocation_t,link);
6071 +
6072 +       /* display header on line 1 */
6073 +       if (v == (void *)1) {
6074 +               seq_puts(m, "USE IX  VLID[0]  VLID[1]  VLID[2]  NAME\n");
6075 +               return 0;
6076 +       }
6077 +
6078 +       /* display one cell per line on subsequent lines */
6079 +       seq_printf(m,"%3d %3hu %08x %08x %08x %s\n",
6080 +                  atomic_read(&vlocation->usage),
6081 +                  vlocation->vix.index,
6082 +                  vlocation->vldb.vid[0],
6083 +                  vlocation->vldb.vid[1],
6084 +                  vlocation->vldb.vid[2],
6085 +                  vlocation->vldb.name
6086 +                  );
6087 +
6088 +       return 0;
6089 +} /* end afs_proc_cell_volumes_show() */
6090 +
6091 +/*****************************************************************************/
6092 +/*
6093 + * open "/proc/fs/afs/<cell>/vlservers" which provides a list of volume location server
6094 + */
6095 +static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file)
6096 +{
6097 +       struct seq_file *m;
6098 +       afs_cell_t *cell;
6099 +       int ret;
6100 +
6101 +       cell = afs_get_cell_maybe((afs_cell_t**)&PDE(inode)->data);
6102 +       if (!cell)
6103 +               return -ENOENT;
6104 +
6105 +       ret = seq_open(file,&afs_proc_cell_vlservers_ops);
6106 +       if (ret<0)
6107 +               return ret;
6108 +
6109 +       m = file->private_data;
6110 +       m->private = cell;
6111 +
6112 +       return 0;
6113 +} /* end afs_proc_cell_vlservers_open() */
6114 +
6115 +/*****************************************************************************/
6116 +/*
6117 + * close the file and release the ref to the cell
6118 + */
6119 +static int afs_proc_cell_vlservers_release(struct inode *inode, struct file *file)
6120 +{
6121 +       afs_cell_t *cell = PDE(inode)->data;
6122 +       int ret;
6123 +
6124 +       ret = seq_release(inode,file);
6125 +
6126 +       afs_put_cell(cell);
6127 +
6128 +} /* end afs_proc_cell_vlservers_release() */
6129 +
6130 +/*****************************************************************************/
6131 +/*
6132 + * set up the iterator to start reading from the cells list and return the first item
6133 + */
6134 +static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
6135 +{
6136 +       afs_cell_t *cell = m->private;
6137 +       loff_t pos = *_pos;
6138 +
6139 +       _enter("cell=%p pos=%Ld",cell,*_pos);
6140 +
6141 +       /* lock the list against modification */
6142 +       down_read(&cell->vl_sem);
6143 +
6144 +       /* allow for the header line */
6145 +       if (!pos)
6146 +               return (void *)1;
6147 +       pos--;
6148 +
6149 +       if (pos>=cell->vl_naddrs)
6150 +               return NULL;
6151 +
6152 +       return &cell->vl_addrs[pos];
6153 +} /* end afs_proc_cell_vlservers_start() */
6154 +
6155 +/*****************************************************************************/
6156 +/*
6157 + * move to next cell in cells list
6158 + */
6159 +static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, loff_t *_pos)
6160 +{
6161 +       afs_cell_t *cell = p->private;
6162 +       loff_t pos;
6163 +
6164 +       _enter("cell=%p{nad=%u} pos=%Ld",cell,cell->vl_naddrs,*_pos);
6165 +
6166 +       pos = *_pos;
6167 +       (*_pos)++;
6168 +       if (pos>=cell->vl_naddrs)
6169 +               return NULL;
6170 +
6171 +       return &cell->vl_addrs[pos];
6172 +} /* end afs_proc_cell_vlservers_next() */
6173 +
6174 +/*****************************************************************************/
6175 +/*
6176 + * clean up after reading from the cells list
6177 + */
6178 +static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v)
6179 +{
6180 +       afs_cell_t *cell = p->private;
6181 +
6182 +       up_read(&cell->vl_sem);
6183 +
6184 +} /* end afs_proc_cell_vlservers_stop() */
6185 +
6186 +/*****************************************************************************/
6187 +/*
6188 + * display a header line followed by a load of volume lines
6189 + */
6190 +static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
6191 +{
6192 +       struct in_addr *addr = v;
6193 +
6194 +       /* display header on line 1 */
6195 +       if (v == (struct in_addr *)1) {
6196 +               seq_puts(m,"ADDRESS\n");
6197 +               return 0;
6198 +       }
6199 +
6200 +       /* display one cell per line on subsequent lines */
6201 +       seq_printf(m,"%u.%u.%u.%u\n",NIPQUAD(addr->s_addr));
6202 +
6203 +       return 0;
6204 +} /* end afs_proc_cell_vlservers_show() */
6205 +
6206 +/*****************************************************************************/
6207 +/*
6208 + * open "/proc/fs/afs/<cell>/servers" which provides a summary of active servers
6209 + */
6210 +static int afs_proc_cell_servers_open(struct inode *inode, struct file *file)
6211 +{
6212 +       struct seq_file *m;
6213 +       afs_cell_t *cell;
6214 +       int ret;
6215 +
6216 +       cell = afs_get_cell_maybe((afs_cell_t**)&PDE(inode)->data);
6217 +       if (!cell)
6218 +               return -ENOENT;
6219 +
6220 +       ret = seq_open(file,&afs_proc_cell_servers_ops);
6221 +       if (ret<0)
6222 +               return ret;
6223 +
6224 +       m = file->private_data;
6225 +       m->private = cell;
6226 +
6227 +       return 0;
6228 +} /* end afs_proc_cell_servers_open() */
6229 +
6230 +/*****************************************************************************/
6231 +/*
6232 + * close the file and release the ref to the cell
6233 + */
6234 +static int afs_proc_cell_servers_release(struct inode *inode, struct file *file)
6235 +{
6236 +       afs_cell_t *cell = PDE(inode)->data;
6237 +       int ret;
6238 +
6239 +       ret = seq_release(inode,file);
6240 +
6241 +       afs_put_cell(cell);
6242 +
6243 +} /* end afs_proc_cell_servers_release() */
6244 +
6245 +/*****************************************************************************/
6246 +/*
6247 + * set up the iterator to start reading from the cells list and return the first item
6248 + */
6249 +static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos)
6250 +{
6251 +       struct list_head *_p;
6252 +       afs_cell_t *cell = m->private;
6253 +       loff_t pos = *_pos;
6254 +
6255 +       _enter("cell=%p pos=%Ld",cell,*_pos);
6256 +
6257 +       /* lock the list against modification */
6258 +       read_lock(&cell->sv_lock);
6259 +
6260 +       /* allow for the header line */
6261 +       if (!pos)
6262 +               return (void *)1;
6263 +       pos--;
6264 +
6265 +       /* find the n'th element in the list */
6266 +       list_for_each(_p,&cell->sv_list)
6267 +               if (!pos--)
6268 +                       break;
6269 +
6270 +       return _p!=&cell->sv_list ? _p : NULL;
6271 +} /* end afs_proc_cell_servers_start() */
6272 +
6273 +/*****************************************************************************/
6274 +/*
6275 + * move to next cell in cells list
6276 + */
6277 +static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, loff_t *_pos)
6278 +{
6279 +       struct list_head *_p;
6280 +       afs_cell_t *cell = p->private;
6281 +
6282 +       _enter("cell=%p pos=%Ld",cell,*_pos);
6283 +
6284 +       (*_pos)++;
6285 +
6286 +       _p = v;
6287 +       _p = v==(void*)1 ? cell->sv_list.next : _p->next;
6288 +
6289 +       return _p!=&cell->sv_list ? _p : NULL;
6290 +} /* end afs_proc_cell_servers_next() */
6291 +
6292 +/*****************************************************************************/
6293 +/*
6294 + * clean up after reading from the cells list
6295 + */
6296 +static void afs_proc_cell_servers_stop(struct seq_file *p, void *v)
6297 +{
6298 +       afs_cell_t *cell = p->private;
6299 +
6300 +       read_unlock(&cell->sv_lock);
6301 +
6302 +} /* end afs_proc_cell_servers_stop() */
6303 +
6304 +/*****************************************************************************/
6305 +/*
6306 + * display a header line followed by a load of volume lines
6307 + */
6308 +static int afs_proc_cell_servers_show(struct seq_file *m, void *v)
6309 +{
6310 +       afs_server_t *server = list_entry(v,afs_server_t,link);
6311 +       char ipaddr[20];
6312 +
6313 +       /* display header on line 1 */
6314 +       if (v == (void *)1) {
6315 +               seq_puts(m, "USE ADDR            STATE\n");
6316 +               return 0;
6317 +       }
6318 +
6319 +       /* display one cell per line on subsequent lines */
6320 +       sprintf(ipaddr,"%u.%u.%u.%u",NIPQUAD(server->addr));
6321 +       seq_printf(m,"%3d %-15.15s %5d\n",
6322 +                  atomic_read(&server->usage),
6323 +                  ipaddr,
6324 +                  server->fs_state
6325 +                  );
6326 +
6327 +       return 0;
6328 +} /* end afs_proc_cell_servers_show() */
6329 diff -urNp linux-5240/fs/afs/server.c linux-5250/fs/afs/server.c
6330 --- linux-5240/fs/afs/server.c  1970-01-01 01:00:00.000000000 +0100
6331 +++ linux-5250/fs/afs/server.c  
6332 @@ -0,0 +1,490 @@
6333 +/* server.c: AFS server record management
6334 + *
6335 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
6336 + * Written by David Howells (dhowells@redhat.com)
6337 + *
6338 + * This program is free software; you can redistribute it and/or
6339 + * modify it under the terms of the GNU General Public License
6340 + * as published by the Free Software Foundation; either version
6341 + * 2 of the License, or (at your option) any later version.
6342 + */
6343 +
6344 +#include <linux/sched.h>
6345 +#include <linux/slab.h>
6346 +#include <rxrpc/peer.h>
6347 +#include <rxrpc/connection.h>
6348 +#include "volume.h"
6349 +#include "cell.h"
6350 +#include "server.h"
6351 +#include "transport.h"
6352 +#include "cache.h"
6353 +#include "vlclient.h"
6354 +#include "kafstimod.h"
6355 +#include "internal.h"
6356 +
6357 +spinlock_t afs_server_peer_lock = SPIN_LOCK_UNLOCKED;
6358 +
6359 +#define FS_SERVICE_ID          1       /* AFS Volume Location Service ID */
6360 +#define VL_SERVICE_ID          52      /* AFS Volume Location Service ID */
6361 +
6362 +static void __afs_server_timeout(afs_timer_t *timer)
6363 +{
6364 +       afs_server_t *server = list_entry(timer,afs_server_t,timeout);
6365 +
6366 +       _debug("SERVER TIMEOUT [%p{u=%d}]",server,atomic_read(&server->usage));
6367 +
6368 +       afs_server_do_timeout(server);
6369 +}
6370 +
6371 +static const struct afs_timer_ops afs_server_timer_ops = {
6372 +       timed_out:      __afs_server_timeout,
6373 +};
6374 +
6375 +/*****************************************************************************/
6376 +/*
6377 + * lookup a server record in a cell
6378 + * - TODO: search the cell's server list
6379 + */
6380 +int afs_server_lookup(afs_cell_t *cell, const struct in_addr *addr, afs_server_t **_server)
6381 +{
6382 +       struct list_head *_p;
6383 +       afs_server_t *server, *active, *zombie;
6384 +       int loop;
6385 +
6386 +       _enter("%p,%08x,",cell,ntohl(addr->s_addr));
6387 +
6388 +       /* allocate and initialise a server record */
6389 +       server = kmalloc(sizeof(afs_server_t),GFP_KERNEL);
6390 +       if (!server) {
6391 +               _leave(" = -ENOMEM");
6392 +               return -ENOMEM;
6393 +       }
6394 +
6395 +       memset(server,0,sizeof(afs_server_t));
6396 +       atomic_set(&server->usage,1);
6397 +
6398 +       INIT_LIST_HEAD(&server->link);
6399 +       init_rwsem(&server->sem);
6400 +       INIT_LIST_HEAD(&server->fs_callq);
6401 +       spin_lock_init(&server->fs_lock);
6402 +       INIT_LIST_HEAD(&server->cb_promises);
6403 +       spin_lock_init(&server->cb_lock);
6404 +
6405 +       for (loop=0; loop<AFS_SERVER_CONN_LIST_SIZE; loop++)
6406 +               server->fs_conn_cnt[loop] = 4;
6407 +
6408 +       memcpy(&server->addr,addr,sizeof(struct in_addr));
6409 +       server->addr.s_addr = addr->s_addr;
6410 +
6411 +       afs_timer_init(&server->timeout,&afs_server_timer_ops);
6412 +
6413 +       /* add to the cell */
6414 +       write_lock(&cell->sv_lock);
6415 +
6416 +       /* check the active list */
6417 +       list_for_each(_p,&cell->sv_list) {
6418 +               active = list_entry(_p,afs_server_t,link);
6419 +
6420 +               if (active->addr.s_addr==addr->s_addr)
6421 +                       goto use_active_server;
6422 +       }
6423 +
6424 +       /* check the inactive list */
6425 +       spin_lock(&cell->sv_gylock);
6426 +       list_for_each(_p,&cell->sv_graveyard) {
6427 +               zombie = list_entry(_p,afs_server_t,link);
6428 +
6429 +               if (zombie->addr.s_addr==addr->s_addr)
6430 +                       goto resurrect_server;
6431 +       }
6432 +       spin_unlock(&cell->sv_gylock);
6433 +
6434 +       afs_get_cell(cell);
6435 +       server->cell = cell;
6436 +       list_add_tail(&server->link,&cell->sv_list);
6437 +
6438 +       write_unlock(&cell->sv_lock);
6439 +
6440 +       *_server = server;
6441 +       _leave(" = 0 (%p)",server);
6442 +       return 0;
6443 +
6444 +       /* found a matching active server */
6445 + use_active_server:
6446 +       _debug("active server");
6447 +       afs_get_server(active);
6448 +       write_unlock(&cell->sv_lock);
6449 +
6450 +       kfree(server);
6451 +
6452 +       *_server = active;
6453 +       _leave(" = 0 (%p)",active);
6454 +       return 0;
6455 +
6456 +       /* found a matching server in the graveyard, so resurrect it and dispose of the new rec */
6457 + resurrect_server:
6458 +       _debug("resurrecting server");
6459 +
6460 +       list_del(&zombie->link);
6461 +       list_add_tail(&zombie->link,&cell->sv_list);
6462 +       afs_get_server(zombie);
6463 +       afs_kafstimod_del_timer(&zombie->timeout);
6464 +       spin_unlock(&cell->sv_gylock);
6465 +       write_unlock(&cell->sv_lock);
6466 +
6467 +       kfree(server);
6468 +
6469 +       *_server = zombie;
6470 +       _leave(" = 0 (%p)",zombie);
6471 +       return 0;
6472 +
6473 +} /* end afs_server_lookup() */
6474 +
6475 +/*****************************************************************************/
6476 +/*
6477 + * destroy a server record
6478 + * - removes from the cell list
6479 + */
6480 +void afs_put_server(afs_server_t *server)
6481 +{
6482 +       afs_cell_t *cell;
6483 +
6484 +       _enter("%p",server);
6485 +
6486 +       cell = server->cell;
6487 +
6488 +       /* sanity check */
6489 +       if (atomic_read(&server->usage)<=0)
6490 +               BUG();
6491 +
6492 +       /* to prevent a race, the decrement and the dequeue must be effectively atomic */
6493 +       write_lock(&cell->sv_lock);
6494 +
6495 +       if (likely(!atomic_dec_and_test(&server->usage))) {
6496 +               write_unlock(&cell->sv_lock);
6497 +               _leave("");
6498 +               return;
6499 +       }
6500 +
6501 +       spin_lock(&cell->sv_gylock);
6502 +       list_del(&server->link);
6503 +       list_add_tail(&server->link,&cell->sv_graveyard);
6504 +
6505 +       /* time out in 10 secs */
6506 +       afs_kafstimod_add_timer(&server->timeout,10*HZ);
6507 +
6508 +       spin_unlock(&cell->sv_gylock);
6509 +       write_unlock(&cell->sv_lock);
6510 +
6511 +       _leave(" [killed]");
6512 +} /* end afs_put_server() */
6513 +
6514 +/*****************************************************************************/
6515 +/*
6516 + * timeout server record
6517 + * - removes from the cell's graveyard if the usage count is zero
6518 + */
6519 +void afs_server_do_timeout(afs_server_t *server)
6520 +{
6521 +       struct rxrpc_peer *peer;
6522 +       afs_cell_t *cell;
6523 +       int loop;
6524 +
6525 +       _enter("%p",server);
6526 +
6527 +       cell = server->cell;
6528 +
6529 +       if (atomic_read(&server->usage)<0) BUG();
6530 +
6531 +       /* remove from graveyard if still dead */
6532 +       spin_lock(&cell->vl_gylock);
6533 +       if (atomic_read(&server->usage)==0)
6534 +               list_del_init(&server->link);
6535 +       else
6536 +               server = NULL;
6537 +       spin_unlock(&cell->vl_gylock);
6538 +
6539 +       if (!server) {
6540 +               _leave("");
6541 +               return; /* resurrected */
6542 +       }
6543 +
6544 +       /* we can now destroy it properly */
6545 +       afs_put_cell(cell);
6546 +
6547 +       /* uncross-point the structs under a global lock */
6548 +       spin_lock(&afs_server_peer_lock);
6549 +       peer = server->peer;
6550 +       if (peer) {
6551 +               server->peer = NULL;
6552 +               peer->user = NULL;
6553 +       }
6554 +       spin_unlock(&afs_server_peer_lock);
6555 +
6556 +       /* finish cleaning up the server */
6557 +       for (loop=AFS_SERVER_CONN_LIST_SIZE-1; loop>=0; loop--)
6558 +               if (server->fs_conn[loop])
6559 +                       rxrpc_put_connection(server->fs_conn[loop]);
6560 +
6561 +       if (server->vlserver)
6562 +               rxrpc_put_connection(server->vlserver);
6563 +
6564 +       kfree(server);
6565 +
6566 +       _leave(" [destroyed]");
6567 +} /* end afs_server_do_timeout() */
6568 +
6569 +/*****************************************************************************/
6570 +/*
6571 + * get a callslot on a connection to the fileserver on the specified server
6572 + */
6573 +int afs_server_request_callslot(afs_server_t *server, struct afs_server_callslot *callslot)
6574 +{
6575 +       struct afs_server_callslot *pcallslot;
6576 +       struct rxrpc_connection *conn;
6577 +       int nconn, ret;
6578 +
6579 +       _enter("%p,",server);
6580 +
6581 +       INIT_LIST_HEAD(&callslot->link);
6582 +       callslot->task = current;
6583 +       callslot->conn = NULL;
6584 +       callslot->nconn = -1;
6585 +       callslot->ready = 0;
6586 +
6587 +       ret = 0;
6588 +       conn = NULL;
6589 +
6590 +       /* get hold of a callslot first */
6591 +       spin_lock(&server->fs_lock);
6592 +
6593 +       /* resurrect the server if it's death timeout has expired */
6594 +       if (server->fs_state) {
6595 +               if (time_before(jiffies,server->fs_dead_jif)) {
6596 +                       ret = server->fs_state;
6597 +                       spin_unlock(&server->fs_lock);
6598 +                       _leave(" = %d [still dead]",ret);
6599 +                       return ret;
6600 +               }
6601 +
6602 +               server->fs_state = 0;
6603 +       }
6604 +
6605 +       /* try and find a connection that has spare callslots */
6606 +       for (nconn=0; nconn<AFS_SERVER_CONN_LIST_SIZE; nconn++) {
6607 +               if (server->fs_conn_cnt[nconn]>0) {
6608 +                       server->fs_conn_cnt[nconn]--;
6609 +                       spin_unlock(&server->fs_lock);
6610 +                       callslot->nconn = nconn;
6611 +                       goto obtained_slot;
6612 +               }
6613 +       }
6614 +
6615 +       /* none were available - wait interruptibly for one to become available */
6616 +       set_current_state(TASK_INTERRUPTIBLE);
6617 +       list_add_tail(&callslot->link,&server->fs_callq);
6618 +       spin_unlock(&server->fs_lock);
6619 +
6620 +       while (!callslot->ready && !signal_pending(current)) {
6621 +               schedule();
6622 +               set_current_state(TASK_INTERRUPTIBLE);
6623 +       }
6624 +
6625 +       set_current_state(TASK_RUNNING);
6626 +
6627 +       /* even if we were interrupted we may still be queued */
6628 +       if (!callslot->ready) {
6629 +               spin_lock(&server->fs_lock);
6630 +               list_del_init(&callslot->link);
6631 +               spin_unlock(&server->fs_lock);
6632 +       }
6633 +
6634 +       nconn = callslot->nconn;
6635 +
6636 +       /* if interrupted, we must release any slot we also got before returning an error */
6637 +       if (signal_pending(current)) {
6638 +               ret = -EINTR;
6639 +               goto error_release;
6640 +       }
6641 +
6642 +       /* if we were woken up with an error, then pass that error back to the called */
6643 +       if (nconn<0) {
6644 +               _leave(" = %d",callslot->errno);
6645 +               return callslot->errno;
6646 +       }
6647 +
6648 +       /* were we given a connection directly? */
6649 +       if (callslot->conn) {
6650 +               /* yes - use it */
6651 +               _leave(" = 0 (nc=%d)",nconn);
6652 +               return 0;
6653 +       }
6654 +
6655 +       /* got a callslot, but no connection */
6656 + obtained_slot:
6657 +
6658 +       /* need to get hold of the RxRPC connection */
6659 +       down_write(&server->sem);
6660 +
6661 +       /* quick check to see if there's an outstanding error */
6662 +       ret = server->fs_state;
6663 +       if (ret)
6664 +               goto error_release_upw;
6665 +
6666 +       if (server->fs_conn[nconn]) {
6667 +               /* reuse an existing connection */
6668 +               rxrpc_get_connection(server->fs_conn[nconn]);
6669 +               callslot->conn = server->fs_conn[nconn];
6670 +       }
6671 +       else {
6672 +               /* create a new connection */
6673 +               ret = rxrpc_create_connection(afs_transport,
6674 +                                             htons(7000),
6675 +                                             server->addr.s_addr,
6676 +                                             FS_SERVICE_ID,
6677 +                                             NULL,
6678 +                                             &server->fs_conn[nconn]);
6679 +
6680 +               if (ret<0)
6681 +                       goto error_release_upw;
6682 +
6683 +               callslot->conn = server->fs_conn[0];
6684 +               rxrpc_get_connection(callslot->conn);
6685 +       }
6686 +
6687 +       up_write(&server->sem);
6688 +
6689 +       _leave(" = 0");
6690 +       return 0;
6691 +
6692 +       /* handle an error occurring */
6693 + error_release_upw:
6694 +       up_write(&server->sem);
6695 +
6696 + error_release:
6697 +       /* either release the callslot or pass it along to another deserving task */
6698 +       spin_lock(&server->fs_lock);
6699 +
6700 +       if (nconn<0) {
6701 +               /* no callslot allocated */
6702 +       }
6703 +       else if (list_empty(&server->fs_callq)) {
6704 +               /* no one waiting */
6705 +               server->fs_conn_cnt[nconn]++;
6706 +               spin_unlock(&server->fs_lock);
6707 +       }
6708 +       else {
6709 +               /* someone's waiting - dequeue them and wake them up */
6710 +               pcallslot = list_entry(server->fs_callq.next,struct afs_server_callslot,link);
6711 +               list_del_init(&pcallslot->link);
6712 +
6713 +               pcallslot->errno = server->fs_state;
6714 +               if (!pcallslot->errno) {
6715 +                       /* pass them out callslot details */
6716 +                       callslot->conn = xchg(&pcallslot->conn,callslot->conn);
6717 +                       pcallslot->nconn = nconn;
6718 +                       callslot->nconn = nconn = -1;
6719 +               }
6720 +               pcallslot->ready = 1;
6721 +               wake_up_process(pcallslot->task);
6722 +               spin_unlock(&server->fs_lock);
6723 +       }
6724 +
6725 +       if (callslot->conn) rxrpc_put_connection(callslot->conn);
6726 +       callslot->conn = NULL;
6727 +
6728 +       _leave(" = %d",ret);
6729 +       return ret;
6730 +
6731 +} /* end afs_server_request_callslot() */
6732 +
6733 +/*****************************************************************************/
6734 +/*
6735 + * release a callslot back to the server
6736 + * - transfers the RxRPC connection to the next pending callslot if possible
6737 + */
6738 +void afs_server_release_callslot(afs_server_t *server, struct afs_server_callslot *callslot)
6739 +{
6740 +       struct afs_server_callslot *pcallslot;
6741 +
6742 +       _enter("{ad=%08x,cnt=%u},{%d}",
6743 +              ntohl(server->addr.s_addr),
6744 +              server->fs_conn_cnt[callslot->nconn],
6745 +              callslot->nconn);
6746 +
6747 +       if (callslot->nconn<0) BUG();
6748 +
6749 +       spin_lock(&server->fs_lock);
6750 +
6751 +       if (list_empty(&server->fs_callq)) {
6752 +               /* no one waiting */
6753 +               server->fs_conn_cnt[callslot->nconn]++;
6754 +               spin_unlock(&server->fs_lock);
6755 +       }
6756 +       else {
6757 +               /* someone's waiting - dequeue them and wake them up */
6758 +               pcallslot = list_entry(server->fs_callq.next,struct afs_server_callslot,link);
6759 +               list_del_init(&pcallslot->link);
6760 +
6761 +               pcallslot->errno = server->fs_state;
6762 +               if (!pcallslot->errno) {
6763 +                       /* pass them out callslot details */
6764 +                       callslot->conn = xchg(&pcallslot->conn,callslot->conn);
6765 +                       pcallslot->nconn = callslot->nconn;
6766 +                       callslot->nconn = -1;
6767 +               }
6768 +
6769 +               pcallslot->ready = 1;
6770 +               wake_up_process(pcallslot->task);
6771 +               spin_unlock(&server->fs_lock);
6772 +       }
6773 +
6774 +       if (callslot->conn) rxrpc_put_connection(callslot->conn);
6775 +
6776 +       _leave("");
6777 +} /* end afs_server_release_callslot() */
6778 +
6779 +/*****************************************************************************/
6780 +/*
6781 + * get a handle to a connection to the vlserver (volume location) on the specified server
6782 + */
6783 +int afs_server_get_vlconn(afs_server_t *server, struct rxrpc_connection **_conn)
6784 +{
6785 +       struct rxrpc_connection *conn;
6786 +       int ret;
6787 +
6788 +       _enter("%p,",server);
6789 +
6790 +       ret = 0;
6791 +       conn = NULL;
6792 +       down_read(&server->sem);
6793 +
6794 +       if (server->vlserver) {
6795 +               /* reuse an existing connection */
6796 +               rxrpc_get_connection(server->vlserver);
6797 +               conn = server->vlserver;
6798 +               up_read(&server->sem);
6799 +       }
6800 +       else {
6801 +               /* create a new connection */
6802 +               up_read(&server->sem);
6803 +               down_write(&server->sem);
6804 +               if (!server->vlserver) {
6805 +                       ret = rxrpc_create_connection(afs_transport,
6806 +                                                     htons(7003),
6807 +                                                     server->addr.s_addr,
6808 +                                                     VL_SERVICE_ID,
6809 +                                                     NULL,
6810 +                                                     &server->vlserver);
6811 +               }
6812 +               if (ret==0) {
6813 +                       rxrpc_get_connection(server->vlserver);
6814 +                       conn = server->vlserver;
6815 +               }
6816 +               up_write(&server->sem);
6817 +       }
6818 +
6819 +       *_conn = conn;
6820 +       _leave(" = %d",ret);
6821 +       return ret;
6822 +} /* end afs_server_get_vlconn() */
6823 diff -urNp linux-5240/fs/afs/server.h linux-5250/fs/afs/server.h
6824 --- linux-5240/fs/afs/server.h  1970-01-01 01:00:00.000000000 +0100
6825 +++ linux-5250/fs/afs/server.h  
6826 @@ -0,0 +1,97 @@
6827 +/* server.h: AFS server record
6828 + *
6829 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
6830 + * Written by David Howells (dhowells@redhat.com)
6831 + *
6832 + * This program is free software; you can redistribute it and/or
6833 + * modify it under the terms of the GNU General Public License
6834 + * as published by the Free Software Foundation; either version
6835 + * 2 of the License, or (at your option) any later version.
6836 + */
6837 +
6838 +#ifndef _LINUX_AFS_SERVER_H
6839 +#define _LINUX_AFS_SERVER_H
6840 +
6841 +#include "types.h"
6842 +#include "kafstimod.h"
6843 +#include <rxrpc/peer.h>
6844 +#include <linux/rwsem.h>
6845 +
6846 +extern spinlock_t afs_server_peer_lock;
6847 +
6848 +/*****************************************************************************/
6849 +/*
6850 + * AFS server record
6851 + */
6852 +struct afs_server
6853 +{
6854 +       atomic_t                usage;
6855 +       afs_cell_t              *cell;          /* cell in which server resides */
6856 +       struct list_head        link;           /* link in cell's server list */
6857 +       struct rw_semaphore     sem;            /* access lock */
6858 +       afs_timer_t             timeout;        /* graveyard timeout */
6859 +       struct in_addr          addr;           /* server address */
6860 +       struct rxrpc_peer       *peer;          /* peer record for this server */
6861 +       struct rxrpc_connection *vlserver;      /* connection to the volume location service */
6862 +
6863 +       /* file service access */
6864 +#define AFS_SERVER_CONN_LIST_SIZE 2
6865 +       struct rxrpc_connection *fs_conn[AFS_SERVER_CONN_LIST_SIZE]; /* FS connections */
6866 +       unsigned                fs_conn_cnt[AFS_SERVER_CONN_LIST_SIZE]; /* per conn call count */
6867 +       struct list_head        fs_callq;       /* queue of processes waiting to make a call */
6868 +       spinlock_t              fs_lock;        /* access lock */
6869 +       int                     fs_state;       /* 0 or reason FS currently marked dead (-errno) */
6870 +       unsigned                fs_rtt;         /* FS round trip time */
6871 +       unsigned long           fs_act_jif;     /* time at which last activity occurred */
6872 +       unsigned long           fs_dead_jif;    /* time at which no longer to be considered dead */
6873 +
6874 +       /* callback promise management */
6875 +       struct list_head        cb_promises;    /* as yet unbroken promises from this server */
6876 +       spinlock_t              cb_lock;        /* access lock */
6877 +};
6878 +
6879 +extern int afs_server_lookup(afs_cell_t *cell, const struct in_addr *addr, afs_server_t **_server);
6880 +
6881 +#define afs_get_server(S) do { atomic_inc(&(S)->usage); } while(0)
6882 +
6883 +extern void afs_put_server(afs_server_t *server);
6884 +extern void afs_server_do_timeout(afs_server_t *server);
6885 +
6886 +extern int afs_server_find_by_peer(const struct rxrpc_peer *peer, afs_server_t **_server);
6887 +
6888 +extern int afs_server_get_vlconn(afs_server_t *server, struct rxrpc_connection **_conn);
6889 +
6890 +static inline afs_server_t *afs_server_get_from_peer(struct rxrpc_peer *peer)
6891 +{
6892 +       afs_server_t *server;
6893 +
6894 +       spin_lock(&afs_server_peer_lock);
6895 +       server = peer->user;
6896 +       if (server)
6897 +               afs_get_server(server);
6898 +       spin_unlock(&afs_server_peer_lock);
6899 +
6900 +       return server;
6901 +}
6902 +
6903 +/*****************************************************************************/
6904 +/*
6905 + * AFS server callslot grant record
6906 + */
6907 +struct afs_server_callslot
6908 +{
6909 +       struct list_head        link;           /* link in server's list */
6910 +       struct task_struct      *task;          /* process waiting to make call */
6911 +       struct rxrpc_connection *conn;          /* connection to use (or NULL on error) */
6912 +       short                   nconn;          /* connection slot number (-1 on error) */
6913 +       char                    ready;          /* T when ready */
6914 +       int                     errno;          /* error number if nconn==-1 */
6915 +};
6916 +
6917 +extern int afs_server_request_callslot(afs_server_t *server,
6918 +                                      struct afs_server_callslot *callslot);
6919 +
6920 +extern void afs_server_release_callslot(afs_server_t *server,
6921 +                                       struct afs_server_callslot *callslot);
6922 +
6923 +#endif /* _LINUX_AFS_SERVER_H */
6924 diff -urNp linux-5240/fs/afs/super.c linux-5250/fs/afs/super.c
6925 --- linux-5240/fs/afs/super.c   1970-01-01 01:00:00.000000000 +0100
6926 +++ linux-5250/fs/afs/super.c   
6927 @@ -0,0 +1,609 @@
6928 +/*
6929 + * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
6930 + *
6931 + * This software may be freely redistributed under the terms of the
6932 + * GNU General Public License.
6933 + *
6934 + * You should have received a copy of the GNU General Public License
6935 + * along with this program; if not, write to the Free Software
6936 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
6937 + *
6938 + * Authors: David Howells <dhowells@redhat.com>
6939 + *          David Woodhouse <dwmw2@cambridge.redhat.com>
6940 + *
6941 + */
6942 +
6943 +#include <linux/kernel.h>
6944 +#include <linux/module.h>
6945 +#include <linux/init.h>
6946 +#include <linux/slab.h>
6947 +#include <linux/fs.h>
6948 +#include <linux/pagemap.h>
6949 +#include "vnode.h"
6950 +#include "volume.h"
6951 +#include "cell.h"
6952 +#include "cmservice.h"
6953 +#include "fsclient.h"
6954 +#include "super.h"
6955 +#include "cache.h"
6956 +#include "internal.h"
6957 +
6958 +#define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */
6959 +
6960 +static inline char *strdup(const char *s)
6961 +{
6962 +       char *ns = kmalloc(strlen(s)+1,GFP_KERNEL);
6963 +       if (ns)
6964 +               strcpy(ns,s);
6965 +       return ns;
6966 +}
6967 +
6968 +static void afs_i_init_once(void *foo, kmem_cache_t *cachep, unsigned long flags);
6969 +
6970 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
6971 +static struct super_block *afs_get_sb(struct file_system_type *fs_type,
6972 +                                     int flags, char *dev_name, void *data);
6973 +#else
6974 +static struct super_block *afs_read_super(struct super_block *sb, void *data, int);
6975 +static void afs_put_inode(struct inode *inode);
6976 +#endif
6977 +
6978 +static struct inode *afs_alloc_inode(struct super_block *sb);
6979 +
6980 +static void afs_put_super(struct super_block *sb);
6981 +
6982 +static void afs_destroy_inode(struct inode *inode);
6983 +
6984 +static struct file_system_type afs_fs_type = {
6985 +       owner:          THIS_MODULE,
6986 +       name:           "afs",
6987 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
6988 +       get_sb:         afs_get_sb,
6989 +       kill_sb:        kill_anon_super,
6990 +#else
6991 +       read_super:     afs_read_super,
6992 +#endif
6993 +};
6994 +
6995 +static struct super_operations afs_super_ops = {
6996 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
6997 +       statfs:         simple_statfs,
6998 +       alloc_inode:    afs_alloc_inode,
6999 +       drop_inode:     generic_delete_inode,
7000 +       destroy_inode:  afs_destroy_inode,
7001 +#else
7002 +       put_inode:      afs_put_inode,
7003 +       read_inode2:    afs_read_inode2,
7004 +#endif
7005 +       clear_inode:    afs_clear_inode,
7006 +       put_super:      afs_put_super,
7007 +};
7008 +
7009 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7010 +static kmem_cache_t *afs_inode_cachep;
7011 +#endif
7012 +
7013 +#if 0
7014 +static const char *cachedev;
7015 +#endif
7016 +static afs_cache_t *afs_cache;
7017 +
7018 +/*****************************************************************************/
7019 +/*
7020 + * initialise the filesystem
7021 + */
7022 +int __init afs_fs_init(void)
7023 +{
7024 +       int ret;
7025 +
7026 +       /* open the cache */
7027 +#if 0
7028 +       ret = -EINVAL;
7029 +       if (!cachedev) {
7030 +               printk(KERN_NOTICE "kAFS: No cache device specified as module parm\n");
7031 +               printk(KERN_NOTICE "kAFS: Set with \"cachedev=<devname>\" on insmod's cmdline\n");
7032 +               return ret;
7033 +       }
7034 +
7035 +       ret = afs_cache_open(cachedev,&afs_cache);
7036 +       if (ret<0) {
7037 +               printk(KERN_NOTICE "kAFS: Failed to open cache device\n");
7038 +               return ret;
7039 +       }
7040 +#endif
7041 +
7042 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7043 +       /* create ourselves an inode cache */
7044 +       ret = -ENOMEM;
7045 +       afs_inode_cachep = kmem_cache_create("afs_inode_cache",
7046 +                                               sizeof(afs_vnode_t),
7047 +                                               0,
7048 +                                               SLAB_HWCACHE_ALIGN,
7049 +                                               afs_i_init_once,
7050 +                                               NULL);
7051 +       if (!afs_inode_cachep) {
7052 +               printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");
7053 +#if 0
7054 +               afs_put_cache(afs_cache);
7055 +#endif
7056 +               return ret;
7057 +       }
7058 +#endif
7059 +
7060 +       /* now export our filesystem to lesser mortals */
7061 +       ret = register_filesystem(&afs_fs_type);
7062 +       if (ret<0) {
7063 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7064 +               kmem_cache_destroy(afs_inode_cachep);
7065 +#endif
7066 +#if 0
7067 +               afs_put_cache(afs_cache);
7068 +#endif
7069 +               return ret;
7070 +       }
7071 +
7072 +       return 0;
7073 +} /* end afs_fs_init() */
7074 +
7075 +/*****************************************************************************/
7076 +/*
7077 + * clean up the filesystem
7078 + */
7079 +void __exit afs_fs_exit(void)
7080 +{
7081 +       /* destroy our private inode cache */
7082 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7083 +       kmem_cache_destroy(afs_inode_cachep);
7084 +#endif
7085 +
7086 +       unregister_filesystem(&afs_fs_type);
7087 +
7088 +#if 0
7089 +       if (afs_cache)
7090 +               afs_put_cache(afs_cache);
7091 +#endif
7092 +} /* end afs_fs_exit() */
7093 +
7094 +/*****************************************************************************/
7095 +/*
7096 + * check that an argument has a value
7097 + */
7098 +static int want_arg(char **_value, const char *option)
7099 +{
7100 +       if (!_value || !*_value || !**_value) {
7101 +               printk(KERN_NOTICE "kAFS: %s: argument missing\n",option);
7102 +               return 0;
7103 +       }
7104 +       return 1;
7105 +} /* end want_arg() */
7106 +
7107 +/*****************************************************************************/
7108 +/*
7109 + * check that there is a value
7110 + */
7111 +#if 0
7112 +static int want_value(char **_value, const char *option)
7113 +{
7114 +       if (!_value || !*_value || !**_value) {
7115 +               printk(KERN_NOTICE "kAFS: %s: argument incomplete\n",option);
7116 +               return 0;
7117 +       }
7118 +       return 1;
7119 +} /* end want_value() */
7120 +#endif
7121 +
7122 +/*****************************************************************************/
7123 +/*
7124 + * check that there's no subsequent value
7125 + */
7126 +static int want_no_value(char *const *_value, const char *option)
7127 +{
7128 +       if (*_value && **_value) {
7129 +               printk(KERN_NOTICE "kAFS: %s: Invalid argument: %s\n",option,*_value);
7130 +               return 0;
7131 +       }
7132 +       return 1;
7133 +} /* end want_no_value() */
7134 +
7135 +/*****************************************************************************/
7136 +/*
7137 + * extract a number from an option string value
7138 + */
7139 +#if 0
7140 +static int want_number(char **_value, const char *option, unsigned long *number,
7141 +                      unsigned long limit)
7142 +{
7143 +       char *value = *_value;
7144 +
7145 +       if (!want_value(_value,option))
7146 +               return 0;
7147 +
7148 +       *number = simple_strtoul(value,_value,0);
7149 +
7150 +       if (value==*_value) {
7151 +               printk(KERN_NOTICE "kAFS: %s: Invalid number: %s\n",option,value);
7152 +               return 0;
7153 +       }
7154 +
7155 +       if (*number>limit) {
7156 +               printk(KERN_NOTICE "kAFS: %s: numeric value %lu > %lu\n",option,*number,limit);
7157 +               return 0;
7158 +       }
7159 +
7160 +       return 1;
7161 +} /* end want_number() */
7162 +#endif
7163 +
7164 +/*****************************************************************************/
7165 +/*
7166 + * extract a separator from an option string value
7167 + */
7168 +#if 0
7169 +static int want_sep(char **_value, const char *option, char sep)
7170 +{
7171 +       if (!want_value(_value,option))
7172 +               return 0;
7173 +
7174 +       if (*(*_value)++ != sep) {
7175 +               printk(KERN_NOTICE "kAFS: %s: '%c' expected: %s\n",option,sep,*_value-1);
7176 +               return 0;
7177 +       }
7178 +
7179 +       return 1;
7180 +} /* end want_number() */
7181 +#endif
7182 +
7183 +/*****************************************************************************/
7184 +/*
7185 + * extract an IP address from an option string value
7186 + */
7187 +#if 0
7188 +static int want_ipaddr(char **_value, const char *option, struct in_addr *addr)
7189 +{
7190 +       unsigned long number[4];
7191 +
7192 +       if (!want_value(_value,option))
7193 +               return 0;
7194 +
7195 +       if (!want_number(_value,option,&number[0],255) ||
7196 +           !want_sep(_value,option,'.') ||
7197 +           !want_number(_value,option,&number[1],255) ||
7198 +           !want_sep(_value,option,'.') ||
7199 +           !want_number(_value,option,&number[2],255) ||
7200 +           !want_sep(_value,option,'.') ||
7201 +           !want_number(_value,option,&number[3],255))
7202 +               return 0;
7203 +
7204 +       ((u8*)addr)[0] = number[0];
7205 +       ((u8*)addr)[1] = number[1];
7206 +       ((u8*)addr)[2] = number[2];
7207 +       ((u8*)addr)[3] = number[3];
7208 +
7209 +       return 1;
7210 +} /* end want_numeric() */
7211 +#endif
7212 +
7213 +/*****************************************************************************/
7214 +/*
7215 + * parse the mount options
7216 + * - this function has been shamelessly adapted from the ext3 fs which shamelessly adapted it from
7217 + *   the msdos fs
7218 + */
7219 +static int afs_super_parse_options(struct afs_super_info *as, char *options, char **devname)
7220 +{
7221 +       char *key, *value;
7222 +       int ret;
7223 +
7224 +       kenter("%s",options);
7225 +
7226 +       ret = 0;
7227 +       while ((key = strsep(&options,",")))
7228 +       {
7229 +               value = strchr(key,'=');
7230 +               if (value)
7231 +                       *value++ = 0;
7232 +
7233 +               printk("kAFS: KEY: %s, VAL:%s\n",key,value?:"-");
7234 +
7235 +               if (strcmp(key,"rwpath")==0) {
7236 +                       if (!want_no_value(&value,"rwpath")) return -EINVAL;
7237 +                       as->rwparent = 1;
7238 +                       continue;
7239 +               }
7240 +               else if (strcmp(key,"vol")==0) {
7241 +                       if (!want_arg(&value,"vol")) return -EINVAL;
7242 +                       *devname = value;
7243 +                       continue;
7244 +               }
7245 +
7246 +#if 0
7247 +               if (strcmp(key,"servers")==0) {
7248 +                       if (!want_arg(&value,"servers")) return -EINVAL;
7249 +
7250 +                       _debug("servers=%s",value);
7251 +
7252 +                       for (;;) {
7253 +                               struct in_addr addr;
7254 +
7255 +                               if (!want_ipaddr(&value,"servers",&addr))
7256 +                                       return -EINVAL;
7257 +
7258 +                               ret = afs_create_server(as->cell,&addr,&as->server);
7259 +                               if (ret<0) {
7260 +                                       printk("kAFS: unable to create server: %d\n",ret);
7261 +                                       return ret;
7262 +                               }
7263 +
7264 +                               if (!*value)
7265 +                                       break;
7266 +
7267 +                               if (as->server) {
7268 +                                       printk(KERN_NOTICE
7269 +                                              "kAFS: only one server can be specified\n");
7270 +                                       return -EINVAL;
7271 +                               }
7272 +
7273 +                               if (!want_sep(&value,"servers",':'))
7274 +                                       return -EINVAL;
7275 +                       }
7276 +                       continue;
7277 +               }
7278 +#endif
7279 +
7280 +               printk("kAFS: Unknown mount option: '%s'\n",key);
7281 +               ret = -EINVAL;
7282 +               goto error;
7283 +       }
7284 +
7285 +       ret = 0;
7286 +
7287 + error:
7288 +       kleave(" = %d",ret);
7289 +
7290 +       return ret;
7291 +} /* end afs_super_parse_options() */
7292 +
7293 +/*****************************************************************************/
7294 +/*
7295 + * fill in the superblock
7296 + */
7297 +static int afs_fill_super(struct super_block *sb, void *_data, int silent)
7298 +{
7299 +       struct afs_super_info *as = NULL;
7300 +       struct dentry *root = NULL;
7301 +       struct inode *inode = NULL;
7302 +       afs_fid_t fid;
7303 +       void **data = _data;
7304 +       char *options, *devname;
7305 +       int ret;
7306 +
7307 +       kenter("");
7308 +
7309 +       if (!data) {
7310 +               kleave(" = -EINVAL");
7311 +               return -EINVAL;
7312 +       }
7313 +       devname = data[0];
7314 +       options = data[1];
7315 +       if (options)
7316 +               options[PAGE_SIZE-1] = 0;
7317 +
7318 +       /* allocate a superblock info record */
7319 +       as = kmalloc(sizeof(struct afs_super_info),GFP_KERNEL);
7320 +       if (!as) {
7321 +               kleave(" = -ENOMEM");
7322 +               return -ENOMEM;
7323 +       }
7324 +
7325 +       memset(as,0,sizeof(struct afs_super_info));
7326 +
7327 +       /* parse the options */
7328 +       if (options) {
7329 +               ret = afs_super_parse_options(as,options,&devname);
7330 +               if (ret<0)
7331 +                       goto error;
7332 +               if (!devname) {
7333 +                       printk("kAFS: no volume name specified\n");
7334 +                       ret = -EINVAL;
7335 +                       goto error;
7336 +               }
7337 +       }
7338 +
7339 +       /* parse the device name */
7340 +       ret = afs_volume_lookup(afs_cache,devname,as->rwparent,&as->volume);
7341 +       if (ret<0)
7342 +               goto error;
7343 +
7344 +       /* fill in the superblock */
7345 +       sb->s_blocksize         = PAGE_CACHE_SIZE;
7346 +       sb->s_blocksize_bits    = PAGE_CACHE_SHIFT;
7347 +       sb->s_magic             = AFS_FS_MAGIC;
7348 +       sb->s_op                = &afs_super_ops;
7349 +       sb->u.generic_sbp       = as;
7350 +
7351 +       /* allocate the root inode and dentry */
7352 +       fid.vid         = as->volume->vid;
7353 +       fid.vnode       = 1;
7354 +       fid.unique      = 1;
7355 +       ret = afs_iget(sb,&fid,&inode);
7356 +       if (ret<0)
7357 +               goto error;
7358 +
7359 +       ret = -ENOMEM;
7360 +       root = d_alloc_root(inode);
7361 +       if (!root)
7362 +               goto error;
7363 +
7364 +       sb->s_root = root;
7365 +
7366 +       kleave(" = 0");
7367 +       return 0;
7368 +
7369 + error:
7370 +       if (root) dput(root);
7371 +       if (inode) iput(inode);
7372 +       if (as) {
7373 +               if (as->volume)         afs_put_volume(as->volume);
7374 +               kfree(as);
7375 +       }
7376 +       sb->u.generic_sbp = NULL;
7377 +
7378 +       kleave(" = %d",ret);
7379 +       return ret;
7380 +} /* end afs_fill_super() */
7381 +
7382 +/*****************************************************************************/
7383 +/*
7384 + * get an AFS superblock
7385 + * - TODO: don't use get_sb_nodev(), but rather call sget() directly
7386 + */
7387 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7388 +static struct super_block *afs_get_sb(struct file_system_type *fs_type,
7389 +                                     int flags,
7390 +                                     char *dev_name,
7391 +                                     void *options)
7392 +{
7393 +       struct super_block *sb;
7394 +       void *data[2] = { dev_name, options };
7395 +       int ret;
7396 +
7397 +       _enter(",,%s,%p",dev_name,options);
7398 +
7399 +       /* start the cache manager */
7400 +       ret = afscm_start();
7401 +       if (ret<0) {
7402 +               _leave(" = %d",ret);
7403 +               return ERR_PTR(ret);
7404 +       }
7405 +
7406 +       /* allocate a deviceless superblock */
7407 +       sb = get_sb_nodev(fs_type,flags,data,afs_fill_super);
7408 +       if (IS_ERR(sb)) {
7409 +               afscm_stop();
7410 +               return sb;
7411 +       }
7412 +
7413 +       _leave("");
7414 +       return sb;
7415 +} /* end afs_get_sb() */
7416 +#endif
7417 +
7418 +/*****************************************************************************/
7419 +/*
7420 + * read an AFS superblock
7421 + */
7422 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
7423 +static struct super_block *afs_read_super(struct super_block *sb, void *options, int silent)
7424 +{
7425 +       void *data[2] = { NULL, options };
7426 +       int ret;
7427 +
7428 +       kenter(",,%s",(char*)options);
7429 +
7430 +       /* start the cache manager */
7431 +       ret = afscm_start();
7432 +       if (ret<0) {
7433 +               kleave(" = NULL (%d)",ret);
7434 +               return NULL;
7435 +       }
7436 +
7437 +       /* allocate a deviceless superblock */
7438 +       ret = afs_fill_super(sb,data,silent);
7439 +       if (ret<0) {
7440 +               afscm_stop();
7441 +               kleave(" = NULL (%d)",ret);
7442 +               return NULL;
7443 +       }
7444 +
7445 +       kleave(" = %p",sb);
7446 +       return sb;
7447 +} /* end afs_read_super() */
7448 +#endif
7449 +
7450 +/*****************************************************************************/
7451 +/*
7452 + * finish the unmounting process on the superblock
7453 + */
7454 +static void afs_put_super(struct super_block *sb)
7455 +{
7456 +       struct afs_super_info *as = sb->u.generic_sbp;
7457 +
7458 +       _enter("");
7459 +
7460 +       if (as) {
7461 +               if (as->volume)         afs_put_volume(as->volume);
7462 +       }
7463 +
7464 +       /* stop the cache manager */
7465 +       afscm_stop();
7466 +
7467 +       _leave("");
7468 +} /* end afs_put_super() */
7469 +
7470 +/*****************************************************************************/
7471 +/*
7472 + * initialise an inode cache slab element prior to any use
7473 + */
7474 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7475 +static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, unsigned long flags)
7476 +{
7477 +       afs_vnode_t *vnode = (afs_vnode_t *) _vnode;
7478 +
7479 +       if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) {
7480 +               memset(vnode,0,sizeof(*vnode));
7481 +               inode_init_once(&vnode->vfs_inode);
7482 +               init_waitqueue_head(&vnode->update_waitq);
7483 +               spin_lock_init(&vnode->lock);
7484 +               INIT_LIST_HEAD(&vnode->cb_link);
7485 +               INIT_LIST_HEAD(&vnode->cb_hash_link);
7486 +               afs_timer_init(&vnode->cb_timeout,&afs_vnode_cb_timed_out_ops);
7487 +       }
7488 +
7489 +} /* end afs_i_init_once() */
7490 +#endif
7491 +
7492 +/*****************************************************************************/
7493 +/*
7494 + * allocate an AFS inode struct from our slab cache
7495 + */
7496 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7497 +static struct inode *afs_alloc_inode(struct super_block *sb)
7498 +{
7499 +       afs_vnode_t *vnode;
7500 +
7501 +       vnode = (afs_vnode_t *) kmem_cache_alloc(afs_inode_cachep,SLAB_KERNEL);
7502 +       if (!vnode)
7503 +               return NULL;
7504 +
7505 +       memset(&vnode->fid,0,sizeof(vnode->fid));
7506 +       memset(&vnode->status,0,sizeof(vnode->status));
7507 +
7508 +       vnode->volume = NULL;
7509 +       vnode->update_cnt = 0;
7510 +       vnode->flags = 0;
7511 +
7512 +       return &vnode->vfs_inode;
7513 +} /* end afs_alloc_inode() */
7514 +#endif
7515 +
7516 +/*****************************************************************************/
7517 +/*
7518 + * put an inode
7519 + */
7520 +static void afs_put_inode(struct inode *inode)
7521 +{
7522 +       if (inode->u.generic_ip) kfree(inode->u.generic_ip);
7523 +
7524 +} /* end afs_put_inode() */
7525 +
7526 +/*****************************************************************************/
7527 +/*
7528 + * destroy an AFS inode struct
7529 + */
7530 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7531 +static void afs_destroy_inode(struct inode *inode)
7532 +{
7533 +       _enter("{%lu}",inode->i_ino);
7534 +       kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode));
7535 +} /* end afs_destroy_inode() */
7536 +#endif
7537 diff -urNp linux-5240/fs/afs/super.h linux-5250/fs/afs/super.h
7538 --- linux-5240/fs/afs/super.h   1970-01-01 01:00:00.000000000 +0100
7539 +++ linux-5250/fs/afs/super.h   
7540 @@ -0,0 +1,43 @@
7541 +/* super.h: AFS filesystem internal private data
7542 + *
7543 + * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
7544 + *
7545 + * This software may be freely redistributed under the terms of the
7546 + * GNU General Public License.
7547 + *
7548 + * You should have received a copy of the GNU General Public License
7549 + * along with this program; if not, write to the Free Software
7550 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
7551 + *
7552 + * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
7553 + *          David Howells <dhowells@redhat.com>
7554 + *
7555 + */
7556 +
7557 +#ifndef _LINUX_AFS_SUPER_H
7558 +#define _LINUX_AFS_SUPER_H
7559 +
7560 +#include <linux/fs.h>
7561 +#include "server.h"
7562 +
7563 +#ifdef __KERNEL__
7564 +
7565 +/*****************************************************************************/
7566 +/*
7567 + * AFS superblock private data
7568 + * - there's one superblock per volume
7569 + */
7570 +struct afs_super_info
7571 +{
7572 +       afs_volume_t            *volume;        /* volume record */
7573 +       char                    rwparent;       /* T if parent is R/W AFS volume */
7574 +};
7575 +
7576 +static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
7577 +{
7578 +       return sb->u.generic_sbp;
7579 +}
7580 +
7581 +#endif /* __KERNEL__ */
7582 +
7583 +#endif /* _LINUX_AFS_SUPER_H */
7584 diff -urNp linux-5240/fs/afs/transport.h linux-5250/fs/afs/transport.h
7585 --- linux-5240/fs/afs/transport.h       1970-01-01 01:00:00.000000000 +0100
7586 +++ linux-5250/fs/afs/transport.h       
7587 @@ -0,0 +1,21 @@
7588 +/* transport.h: AFS transport management
7589 + *
7590 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
7591 + * Written by David Howells (dhowells@redhat.com)
7592 + *
7593 + * This program is free software; you can redistribute it and/or
7594 + * modify it under the terms of the GNU General Public License
7595 + * as published by the Free Software Foundation; either version
7596 + * 2 of the License, or (at your option) any later version.
7597 + */
7598 +
7599 +#ifndef _LINUX_AFS_TRANSPORT_H
7600 +#define _LINUX_AFS_TRANSPORT_H
7601 +
7602 +#include "types.h"
7603 +#include <rxrpc/transport.h>
7604 +
7605 +/* the cache manager transport endpoint */
7606 +extern struct rxrpc_transport *afs_transport;
7607 +
7608 +#endif /* _LINUX_AFS_TRANSPORT_H */
7609 diff -urNp linux-5240/fs/afs/types.h linux-5250/fs/afs/types.h
7610 --- linux-5240/fs/afs/types.h   1970-01-01 01:00:00.000000000 +0100
7611 +++ linux-5250/fs/afs/types.h   
7612 @@ -0,0 +1,141 @@
7613 +/* types.h: AFS types
7614 + *
7615 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
7616 + * Written by David Howells (dhowells@redhat.com)
7617 + *
7618 + * This program is free software; you can redistribute it and/or
7619 + * modify it under the terms of the GNU General Public License
7620 + * as published by the Free Software Foundation; either version
7621 + * 2 of the License, or (at your option) any later version.
7622 + */
7623 +
7624 +#ifndef _LINUX_AFS_TYPES_H
7625 +#define _LINUX_AFS_TYPES_H
7626 +
7627 +#ifdef __KERNEL__
7628 +#include <rxrpc/types.h>
7629 +#endif /* __KERNEL__ */
7630 +
7631 +typedef unsigned                       afs_volid_t;
7632 +typedef unsigned                       afs_vnodeid_t;
7633 +typedef unsigned long long             afs_dataversion_t;
7634 +
7635 +typedef struct afs_async_op            afs_async_op_t;
7636 +typedef struct afs_cache               afs_cache_t;
7637 +typedef struct afs_cache_volindex      afs_cache_volindex_t;
7638 +typedef struct afs_callback            afs_callback_t;
7639 +typedef struct afs_cell                        afs_cell_t;
7640 +typedef struct afs_fid                 afs_fid_t;
7641 +typedef struct afs_file_status         afs_file_status_t;
7642 +typedef struct afs_server              afs_server_t;
7643 +typedef struct afs_timer               afs_timer_t;
7644 +typedef struct afs_vlocation           afs_vlocation_t;
7645 +typedef struct afs_vnode               afs_vnode_t;
7646 +typedef struct afs_volsync             afs_volsync_t;
7647 +typedef struct afs_volume              afs_volume_t;
7648 +typedef struct afs_volume_info         afs_volume_info_t;
7649 +
7650 +typedef struct afsvl_dbentry           afsvl_dbentry_t;
7651 +
7652 +typedef enum {
7653 +       AFSVL_RWVOL,                    /* read/write volume */
7654 +       AFSVL_ROVOL,                    /* read-only volume */
7655 +       AFSVL_BACKVOL,                  /* backup volume */
7656 +} afs_voltype_t;
7657 +
7658 +extern const char *afs_voltypes[];
7659 +
7660 +typedef enum {
7661 +       AFS_FTYPE_INVALID       = 0,
7662 +       AFS_FTYPE_FILE          = 1,
7663 +       AFS_FTYPE_DIR           = 2,
7664 +       AFS_FTYPE_SYMLINK       = 3,
7665 +} afs_file_type_t;
7666 +
7667 +#ifdef __KERNEL__
7668 +
7669 +/*****************************************************************************/
7670 +/*
7671 + * AFS file identifier
7672 + */
7673 +struct afs_fid
7674 +{
7675 +       afs_volid_t     vid;            /* volume ID */
7676 +       afs_vnodeid_t   vnode;          /* file index within volume */
7677 +       unsigned        unique;         /* unique ID number (file index version) */
7678 +};
7679 +
7680 +/*****************************************************************************/
7681 +/*
7682 + * AFS callback notification
7683 + */
7684 +typedef enum {
7685 +       AFSCM_CB_UNTYPED        = 0,    /* no type set on CB break */
7686 +       AFSCM_CB_EXCLUSIVE      = 1,    /* CB exclusive to CM [not implemented] */
7687 +       AFSCM_CB_SHARED         = 2,    /* CB shared by other CM's */
7688 +       AFSCM_CB_DROPPED        = 3,    /* CB promise cancelled by file server */
7689 +} afs_callback_type_t;
7690 +
7691 +struct afs_callback
7692 +{
7693 +       afs_server_t            *server;        /* server that made the promise */
7694 +       afs_fid_t               fid;            /* file identifier */
7695 +       unsigned                version;        /* callback version */
7696 +       unsigned                expiry;         /* time at which expires */
7697 +       afs_callback_type_t     type;           /* type of callback */
7698 +};
7699 +
7700 +#define AFSCBMAX 50
7701 +
7702 +/*****************************************************************************/
7703 +/*
7704 + * AFS volume information
7705 + */
7706 +struct afs_volume_info
7707 +{
7708 +       afs_volid_t             vid;            /* volume ID */
7709 +       afs_voltype_t           type;           /* type of this volume */
7710 +       afs_volid_t             type_vids[5];   /* volume ID's for possible types for this vol */
7711 +       
7712 +       /* list of fileservers serving this volume */
7713 +       size_t                  nservers;       /* number of entries used in servers[] */
7714 +       struct {
7715 +               struct in_addr  addr;           /* fileserver address */
7716 +       } servers[8];
7717 +};
7718 +
7719 +/*****************************************************************************/
7720 +/*
7721 + * AFS file status information
7722 + */
7723 +struct afs_file_status
7724 +{
7725 +       unsigned                if_version;     /* interface version */
7726 +#define AFS_FSTATUS_VERSION    1
7727 +
7728 +       afs_file_type_t         type;           /* file type */
7729 +       unsigned                nlink;          /* link count */
7730 +       size_t                  size;           /* file size */
7731 +       afs_dataversion_t       version;        /* current data version */
7732 +       unsigned                author;         /* author ID */
7733 +       unsigned                owner;          /* owner ID */
7734 +       unsigned                caller_access;  /* access rights for authenticated caller */
7735 +       unsigned                anon_access;    /* access rights for unauthenticated caller */
7736 +       umode_t                 mode;           /* UNIX mode */
7737 +       afs_fid_t               parent;         /* parent file ID */
7738 +       time_t                  mtime_client;   /* last time client changed data */
7739 +       time_t                  mtime_server;   /* last time server changed data */
7740 +};
7741 +
7742 +/*****************************************************************************/
7743 +/*
7744 + * AFS volume synchronisation information
7745 + */
7746 +struct afs_volsync
7747 +{
7748 +       time_t                  creation;       /* volume creation time */
7749 +};
7750 +
7751 +#endif /* __KERNEL__ */
7752 +
7753 +#endif /* _LINUX_AFS_TYPES_H */
7754 diff -urNp linux-5240/fs/afs/vlclient.c linux-5250/fs/afs/vlclient.c
7755 --- linux-5240/fs/afs/vlclient.c        1970-01-01 01:00:00.000000000 +0100
7756 +++ linux-5250/fs/afs/vlclient.c        
7757 @@ -0,0 +1,661 @@
7758 +/* vlclient.c: AFS Volume Location Service client
7759 + *
7760 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
7761 + * Written by David Howells (dhowells@redhat.com)
7762 + *
7763 + * This program is free software; you can redistribute it and/or
7764 + * modify it under the terms of the GNU General Public License
7765 + * as published by the Free Software Foundation; either version
7766 + * 2 of the License, or (at your option) any later version.
7767 + */
7768 +
7769 +#include <linux/init.h>
7770 +#include <linux/sched.h>
7771 +#include <rxrpc/rxrpc.h>
7772 +#include <rxrpc/transport.h>
7773 +#include <rxrpc/connection.h>
7774 +#include <rxrpc/call.h>
7775 +#include "server.h"
7776 +#include "vlclient.h"
7777 +#include "kafsasyncd.h"
7778 +#include "kafstimod.h"
7779 +#include "errors.h"
7780 +#include "internal.h"
7781 +
7782 +#define VLGETENTRYBYID         503     /* AFS Get Cache Entry By ID operation ID */
7783 +#define VLGETENTRYBYNAME       504     /* AFS Get Cache Entry By Name operation ID */
7784 +#define VLPROBE                        514     /* AFS Probe Volume Location Service operation ID */
7785 +
7786 +static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call);
7787 +static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call);
7788 +
7789 +/*****************************************************************************/
7790 +/*
7791 + * map afs VL abort codes to/from Linux error codes
7792 + * - called with call->lock held
7793 + */
7794 +static void afs_rxvl_aemap(struct rxrpc_call *call)
7795 +{
7796 +       int err;
7797 +
7798 +       _enter("{%u,%u,%d}",call->app_err_state,call->app_abort_code,call->app_errno);
7799 +
7800 +       switch (call->app_err_state) {
7801 +       case RXRPC_ESTATE_LOCAL_ABORT:
7802 +               call->app_abort_code = -call->app_errno;
7803 +               return;
7804 +
7805 +       case RXRPC_ESTATE_PEER_ABORT:
7806 +               switch (call->app_abort_code) {
7807 +               case AFSVL_IDEXIST:             err = -EEXIST;          break;
7808 +               case AFSVL_IO:                  err = -EREMOTEIO;       break;
7809 +               case AFSVL_NAMEEXIST:           err = -EEXIST;          break;
7810 +               case AFSVL_CREATEFAIL:          err = -EREMOTEIO;       break;
7811 +               case AFSVL_NOENT:               err = -ENOMEDIUM;       break;
7812 +               case AFSVL_EMPTY:               err = -ENOMEDIUM;       break;
7813 +               case AFSVL_ENTDELETED:          err = -ENOMEDIUM;       break;
7814 +               case AFSVL_BADNAME:             err = -EINVAL;          break;
7815 +               case AFSVL_BADINDEX:            err = -EINVAL;          break;
7816 +               case AFSVL_BADVOLTYPE:          err = -EINVAL;          break;
7817 +               case AFSVL_BADSERVER:           err = -EINVAL;          break;
7818 +               case AFSVL_BADPARTITION:        err = -EINVAL;          break;
7819 +               case AFSVL_REPSFULL:            err = -EFBIG;           break;
7820 +               case AFSVL_NOREPSERVER:         err = -ENOENT;          break;
7821 +               case AFSVL_DUPREPSERVER:        err = -EEXIST;          break;
7822 +               case AFSVL_RWNOTFOUND:          err = -ENOENT;          break;
7823 +               case AFSVL_BADREFCOUNT:         err = -EINVAL;          break;
7824 +               case AFSVL_SIZEEXCEEDED:        err = -EINVAL;          break;
7825 +               case AFSVL_BADENTRY:            err = -EINVAL;          break;
7826 +               case AFSVL_BADVOLIDBUMP:        err = -EINVAL;          break;
7827 +               case AFSVL_IDALREADYHASHED:     err = -EINVAL;          break;
7828 +               case AFSVL_ENTRYLOCKED:         err = -EBUSY;           break;
7829 +               case AFSVL_BADVOLOPER:          err = -EBADRQC;         break;
7830 +               case AFSVL_BADRELLOCKTYPE:      err = -EINVAL;          break;
7831 +               case AFSVL_RERELEASE:           err = -EREMOTEIO;       break;
7832 +               case AFSVL_BADSERVERFLAG:       err = -EINVAL;          break;
7833 +               case AFSVL_PERM:                err = -EACCES;          break;
7834 +               case AFSVL_NOMEM:               err = -EREMOTEIO;       break;
7835 +               default:
7836 +                       err = afs_abort_to_error(call->app_abort_code);
7837 +                       break;
7838 +               }
7839 +               call->app_errno = err;
7840 +               return;
7841 +
7842 +       default:
7843 +               return;
7844 +       }
7845 +} /* end afs_rxvl_aemap() */
7846 +
7847 +/*****************************************************************************/
7848 +/*
7849 + * probe a volume location server to see if it is still alive
7850 + */
7851 +int afs_rxvl_probe(afs_server_t *server, int alloc_flags)
7852 +{
7853 +       DECLARE_WAITQUEUE(myself,current);
7854 +
7855 +       struct rxrpc_connection *conn;
7856 +       struct rxrpc_call *call;
7857 +       struct iovec piov[1];
7858 +       size_t sent;
7859 +       int ret;
7860 +       u32 param[1];
7861 +
7862 +       /* get hold of the vlserver connection */
7863 +       ret = afs_server_get_vlconn(server,&conn);
7864 +       if (ret<0)
7865 +               goto out;
7866 +
7867 +       /* create a call through that connection */
7868 +       ret = rxrpc_create_call(conn,NULL,NULL,afs_rxvl_aemap,&call);
7869 +       if (ret<0) {
7870 +               printk("kAFS: Unable to create call: %d\n",ret);
7871 +               goto out_put_conn;
7872 +       }
7873 +       call->app_opcode = VLPROBE;
7874 +
7875 +       /* we want to get event notifications from the call */
7876 +       add_wait_queue(&call->waitq,&myself);
7877 +
7878 +       /* marshall the parameters */
7879 +       param[0] = htonl(VLPROBE);
7880 +       piov[0].iov_len = sizeof(param);
7881 +       piov[0].iov_base = param;
7882 +
7883 +       /* send the parameters to the server */
7884 +       ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,alloc_flags,0,&sent);
7885 +       if (ret<0)
7886 +               goto abort;
7887 +
7888 +       /* wait for the reply to completely arrive */
7889 +       for (;;) {
7890 +               set_current_state(TASK_INTERRUPTIBLE);
7891 +               if (call->app_call_state!=RXRPC_CSTATE_CLNT_RCV_REPLY ||
7892 +                   signal_pending(current))
7893 +                       break;
7894 +               schedule();
7895 +       }
7896 +       set_current_state(TASK_RUNNING);
7897 +
7898 +       ret = -EINTR;
7899 +       if (signal_pending(current))
7900 +               goto abort;
7901 +
7902 +       switch (call->app_call_state) {
7903 +       case RXRPC_CSTATE_ERROR:
7904 +               ret = call->app_errno;
7905 +               goto out_unwait;
7906 +
7907 +       case RXRPC_CSTATE_CLNT_GOT_REPLY:
7908 +               ret = 0;
7909 +               goto out_unwait;
7910 +
7911 +       default:
7912 +               BUG();
7913 +       }
7914 +
7915 + abort:
7916 +       set_current_state(TASK_UNINTERRUPTIBLE);
7917 +       rxrpc_call_abort(call,ret);
7918 +       schedule();
7919 +
7920 + out_unwait:
7921 +       set_current_state(TASK_RUNNING);
7922 +       remove_wait_queue(&call->waitq,&myself);
7923 +       rxrpc_put_call(call);
7924 + out_put_conn:
7925 +       rxrpc_put_connection(conn);
7926 + out:
7927 +       return ret;
7928 +
7929 +} /* end afs_rxvl_probe() */
7930 +
7931 +/*****************************************************************************/
7932 +/*
7933 + * look up a volume location database entry by name
7934 + */
7935 +int afs_rxvl_get_entry_by_name(afs_server_t *server, const char *volname,
7936 +                              struct afs_cache_volume *entry)
7937 +{
7938 +       DECLARE_WAITQUEUE(myself,current);
7939 +
7940 +       struct rxrpc_connection *conn;
7941 +       struct rxrpc_call *call;
7942 +       struct iovec piov[3];
7943 +       unsigned tmp;
7944 +       size_t sent;
7945 +       int ret, loop;
7946 +       u32 *bp, param[2], zero;
7947 +
7948 +       _enter(",%s,",volname);
7949 +
7950 +       memset(entry,0,sizeof(*entry));
7951 +
7952 +       /* get hold of the vlserver connection */
7953 +       ret = afs_server_get_vlconn(server,&conn);
7954 +       if (ret<0)
7955 +               goto out;
7956 +
7957 +       /* create a call through that connection */
7958 +       ret = rxrpc_create_call(conn,NULL,NULL,afs_rxvl_aemap,&call);
7959 +       if (ret<0) {
7960 +               printk("kAFS: Unable to create call: %d\n",ret);
7961 +               goto out_put_conn;
7962 +       }
7963 +       call->app_opcode = VLGETENTRYBYNAME;
7964 +
7965 +       /* we want to get event notifications from the call */
7966 +       add_wait_queue(&call->waitq,&myself);
7967 +
7968 +       /* marshall the parameters */
7969 +       piov[1].iov_len = strlen(volname);
7970 +       piov[1].iov_base = (char*)volname;
7971 +
7972 +       zero = 0;
7973 +       piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
7974 +       piov[2].iov_base = &zero;
7975 +
7976 +       param[0] = htonl(VLGETENTRYBYNAME);
7977 +       param[1] = htonl(piov[1].iov_len);
7978 +
7979 +       piov[0].iov_len = sizeof(param);
7980 +       piov[0].iov_base = param;
7981 +
7982 +       /* send the parameters to the server */
7983 +       ret = rxrpc_call_write_data(call,3,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
7984 +       if (ret<0)
7985 +               goto abort;
7986 +
7987 +       /* wait for the reply to completely arrive */
7988 +       bp = rxrpc_call_alloc_scratch(call,384);
7989 +
7990 +       ret = rxrpc_call_read_data(call,bp,384,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
7991 +       if (ret<0) {
7992 +               if (ret==-ECONNABORTED) {
7993 +                       ret = call->app_errno;
7994 +                       goto out_unwait;
7995 +               }
7996 +               goto abort;
7997 +       }
7998 +
7999 +       /* unmarshall the reply */
8000 +       for (loop=0; loop<64; loop++)
8001 +               entry->name[loop] = ntohl(*bp++);
8002 +       bp++; /* final NUL */
8003 +
8004 +       bp++; /* type */
8005 +       entry->nservers = ntohl(*bp++);
8006 +
8007 +       for (loop=0; loop<8; loop++)
8008 +               entry->servers[loop].s_addr = *bp++;
8009 +
8010 +       bp += 8; /* partition IDs */
8011 +
8012 +       for (loop=0; loop<8; loop++) {
8013 +               tmp = ntohl(*bp++);
8014 +               if (tmp & AFS_VLSF_RWVOL  ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RW;
8015 +               if (tmp & AFS_VLSF_ROVOL  ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RO;
8016 +               if (tmp & AFS_VLSF_BACKVOL) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_BAK;
8017 +       }
8018 +
8019 +       entry->vid[0] = ntohl(*bp++);
8020 +       entry->vid[1] = ntohl(*bp++);
8021 +       entry->vid[2] = ntohl(*bp++);
8022 +
8023 +       bp++; /* clone ID */
8024 +
8025 +       tmp = ntohl(*bp++); /* flags */
8026 +       if (tmp & AFS_VLF_RWEXISTS  ) entry->vidmask |= AFS_CACHE_VOL_STM_RW;
8027 +       if (tmp & AFS_VLF_ROEXISTS  ) entry->vidmask |= AFS_CACHE_VOL_STM_RO;
8028 +       if (tmp & AFS_VLF_BACKEXISTS) entry->vidmask |= AFS_CACHE_VOL_STM_BAK;
8029 +
8030 +       ret = -ENOMEDIUM;
8031 +       if (!entry->vidmask)
8032 +               goto abort;
8033 +
8034 +       /* success */
8035 +       entry->ctime = xtime.tv_sec;
8036 +       ret = 0;
8037 +
8038 + out_unwait:
8039 +       set_current_state(TASK_RUNNING);
8040 +       remove_wait_queue(&call->waitq,&myself);
8041 +       rxrpc_put_call(call);
8042 + out_put_conn:
8043 +       rxrpc_put_connection(conn);
8044 + out:
8045 +       _leave(" = %d",ret);
8046 +       return ret;
8047 +
8048 + abort:
8049 +       set_current_state(TASK_UNINTERRUPTIBLE);
8050 +       rxrpc_call_abort(call,ret);
8051 +       schedule();
8052 +       goto out_unwait;
8053 +} /* end afs_rxvl_get_entry_by_name() */
8054 +
8055 +/*****************************************************************************/
8056 +/*
8057 + * look up a volume location database entry by ID
8058 + */
8059 +int afs_rxvl_get_entry_by_id(afs_server_t *server,
8060 +                            afs_volid_t volid,
8061 +                            afs_voltype_t voltype,
8062 +                            struct afs_cache_volume *entry)
8063 +{
8064 +       DECLARE_WAITQUEUE(myself,current);
8065 +
8066 +       struct rxrpc_connection *conn;
8067 +       struct rxrpc_call *call;
8068 +       struct iovec piov[1];
8069 +       unsigned tmp;
8070 +       size_t sent;
8071 +       int ret, loop;
8072 +       u32 *bp, param[3];
8073 +
8074 +       _enter(",%x,%d,",volid,voltype);
8075 +
8076 +       memset(entry,0,sizeof(*entry));
8077 +
8078 +       /* get hold of the vlserver connection */
8079 +       ret = afs_server_get_vlconn(server,&conn);
8080 +       if (ret<0)
8081 +               goto out;
8082 +
8083 +       /* create a call through that connection */
8084 +       ret = rxrpc_create_call(conn,NULL,NULL,afs_rxvl_aemap,&call);
8085 +       if (ret<0) {
8086 +               printk("kAFS: Unable to create call: %d\n",ret);
8087 +               goto out_put_conn;
8088 +       }
8089 +       call->app_opcode = VLGETENTRYBYID;
8090 +
8091 +       /* we want to get event notifications from the call */
8092 +       add_wait_queue(&call->waitq,&myself);
8093 +
8094 +       /* marshall the parameters */
8095 +       param[0] = htonl(VLGETENTRYBYID);
8096 +       param[1] = htonl(volid);
8097 +       param[2] = htonl(voltype);
8098 +
8099 +       piov[0].iov_len = sizeof(param);
8100 +       piov[0].iov_base = param;
8101 +
8102 +       /* send the parameters to the server */
8103 +       ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
8104 +       if (ret<0)
8105 +               goto abort;
8106 +
8107 +       /* wait for the reply to completely arrive */
8108 +       bp = rxrpc_call_alloc_scratch(call,384);
8109 +
8110 +       ret = rxrpc_call_read_data(call,bp,384,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
8111 +       if (ret<0) {
8112 +               if (ret==-ECONNABORTED) {
8113 +                       ret = call->app_errno;
8114 +                       goto out_unwait;
8115 +               }
8116 +               goto abort;
8117 +       }
8118 +
8119 +       /* unmarshall the reply */
8120 +       for (loop=0; loop<64; loop++)
8121 +               entry->name[loop] = ntohl(*bp++);
8122 +       bp++; /* final NUL */
8123 +
8124 +       bp++; /* type */
8125 +       entry->nservers = ntohl(*bp++);
8126 +
8127 +       for (loop=0; loop<8; loop++)
8128 +               entry->servers[loop].s_addr = *bp++;
8129 +
8130 +       bp += 8; /* partition IDs */
8131 +
8132 +       for (loop=0; loop<8; loop++) {
8133 +               tmp = ntohl(*bp++);
8134 +               if (tmp & AFS_VLSF_RWVOL  ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RW;
8135 +               if (tmp & AFS_VLSF_ROVOL  ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RO;
8136 +               if (tmp & AFS_VLSF_BACKVOL) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_BAK;
8137 +       }
8138 +
8139 +       entry->vid[0] = ntohl(*bp++);
8140 +       entry->vid[1] = ntohl(*bp++);
8141 +       entry->vid[2] = ntohl(*bp++);
8142 +
8143 +       bp++; /* clone ID */
8144 +
8145 +       tmp = ntohl(*bp++); /* flags */
8146 +       if (tmp & AFS_VLF_RWEXISTS  ) entry->vidmask |= AFS_CACHE_VOL_STM_RW;
8147 +       if (tmp & AFS_VLF_ROEXISTS  ) entry->vidmask |= AFS_CACHE_VOL_STM_RO;
8148 +       if (tmp & AFS_VLF_BACKEXISTS) entry->vidmask |= AFS_CACHE_VOL_STM_BAK;
8149 +
8150 +       ret = -ENOMEDIUM;
8151 +       if (!entry->vidmask)
8152 +               goto abort;
8153 +
8154 +#if 0 /* TODO: remove */
8155 +       entry->nservers = 3;
8156 +       entry->servers[0].s_addr = htonl(0xac101249);
8157 +       entry->servers[1].s_addr = htonl(0xac101243);
8158 +       entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
8159 +
8160 +       entry->srvtmask[0] = AFS_CACHE_VOL_STM_RO;
8161 +       entry->srvtmask[1] = AFS_CACHE_VOL_STM_RO;
8162 +       entry->srvtmask[2] = AFS_CACHE_VOL_STM_RO | AFS_CACHE_VOL_STM_RW;
8163 +#endif
8164 +
8165 +       /* success */
8166 +       entry->ctime = xtime.tv_sec;
8167 +       ret = 0;
8168 +
8169 + out_unwait:
8170 +       set_current_state(TASK_RUNNING);
8171 +       remove_wait_queue(&call->waitq,&myself);
8172 +       rxrpc_put_call(call);
8173 + out_put_conn:
8174 +       rxrpc_put_connection(conn);
8175 + out:
8176 +       _leave(" = %d",ret);
8177 +       return ret;
8178 +
8179 + abort:
8180 +       set_current_state(TASK_UNINTERRUPTIBLE);
8181 +       rxrpc_call_abort(call,ret);
8182 +       schedule();
8183 +       goto out_unwait;
8184 +} /* end afs_rxvl_get_entry_by_id() */
8185 +
8186 +/*****************************************************************************/
8187 +/*
8188 + * look up a volume location database entry by ID asynchronously
8189 + */
8190 +int afs_rxvl_get_entry_by_id_async(afs_async_op_t *op,
8191 +                                  afs_volid_t volid,
8192 +                                  afs_voltype_t voltype)
8193 +{
8194 +       struct rxrpc_connection *conn;
8195 +       struct rxrpc_call *call;
8196 +       struct iovec piov[1];
8197 +       size_t sent;
8198 +       int ret;
8199 +       u32 param[3];
8200 +
8201 +       _enter(",%x,%d,",volid,voltype);
8202 +
8203 +       /* get hold of the vlserver connection */
8204 +       ret = afs_server_get_vlconn(op->server,&conn);
8205 +       if (ret<0) {
8206 +               _leave(" = %d",ret);
8207 +               return ret;
8208 +       }
8209 +
8210 +       /* create a call through that connection */
8211 +       ret = rxrpc_create_call(conn,
8212 +                               afs_rxvl_get_entry_by_id_attn,
8213 +                               afs_rxvl_get_entry_by_id_error,
8214 +                               afs_rxvl_aemap,
8215 +                               &op->call);
8216 +       rxrpc_put_connection(conn);
8217 +
8218 +       if (ret<0) {
8219 +               printk("kAFS: Unable to create call: %d\n",ret);
8220 +               _leave(" = %d",ret);
8221 +               return ret;
8222 +       }
8223 +
8224 +       op->call->app_opcode = VLGETENTRYBYID;
8225 +       op->call->app_user = op;
8226 +
8227 +       call = op->call;
8228 +       rxrpc_get_call(call);
8229 +
8230 +       /* send event notifications from the call to kafsasyncd */
8231 +       afs_kafsasyncd_begin_op(op);
8232 +
8233 +       /* marshall the parameters */
8234 +       param[0] = htonl(VLGETENTRYBYID);
8235 +       param[1] = htonl(volid);
8236 +       param[2] = htonl(voltype);
8237 +
8238 +       piov[0].iov_len = sizeof(param);
8239 +       piov[0].iov_base = param;
8240 +
8241 +       /* allocate result read buffer in scratch space */
8242 +       call->app_scr_ptr = rxrpc_call_alloc_scratch(op->call,384);
8243 +
8244 +       /* send the parameters to the server */
8245 +       ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
8246 +       if (ret<0) {
8247 +               rxrpc_call_abort(call,ret); /* handle from kafsasyncd */
8248 +               ret = 0;
8249 +               goto out;
8250 +       }
8251 +
8252 +       /* wait for the reply to completely arrive */
8253 +       ret = rxrpc_call_read_data(call,call->app_scr_ptr,384,0);
8254 +       switch (ret) {
8255 +       case 0:
8256 +       case -EAGAIN:
8257 +       case -ECONNABORTED:
8258 +               ret = 0;
8259 +               break;  /* all handled by kafsasyncd */
8260 +
8261 +       default:
8262 +               rxrpc_call_abort(call,ret); /* force kafsasyncd to handle it */
8263 +               ret = 0;
8264 +               break;
8265 +       }
8266 +
8267 + out:
8268 +       rxrpc_put_call(call);
8269 +       _leave(" = %d",ret);
8270 +       return ret;
8271 +
8272 +} /* end afs_rxvl_get_entry_by_id_async() */
8273 +
8274 +/*****************************************************************************/
8275 +/*
8276 + * attend to the asynchronous get VLDB entry by ID
8277 + */
8278 +int afs_rxvl_get_entry_by_id_async2(afs_async_op_t *op,
8279 +                                   struct afs_cache_volume *entry)
8280 +{
8281 +       unsigned *bp, tmp;
8282 +       int loop, ret;
8283 +
8284 +       _enter("{op=%p cst=%u}",op,op->call->app_call_state);
8285 +
8286 +       memset(entry,0,sizeof(*entry));
8287 +
8288 +       if (op->call->app_call_state==RXRPC_CSTATE_COMPLETE) {
8289 +               /* operation finished */
8290 +               afs_kafsasyncd_terminate_op(op);
8291 +
8292 +               bp = op->call->app_scr_ptr;
8293 +
8294 +               /* unmarshall the reply */
8295 +               for (loop=0; loop<64; loop++)
8296 +                       entry->name[loop] = ntohl(*bp++);
8297 +               bp++; /* final NUL */
8298 +
8299 +               bp++; /* type */
8300 +               entry->nservers = ntohl(*bp++);
8301 +
8302 +               for (loop=0; loop<8; loop++)
8303 +                       entry->servers[loop].s_addr = *bp++;
8304 +
8305 +               bp += 8; /* partition IDs */
8306 +
8307 +               for (loop=0; loop<8; loop++) {
8308 +                       tmp = ntohl(*bp++);
8309 +                       if (tmp & AFS_VLSF_RWVOL  ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RW;
8310 +                       if (tmp & AFS_VLSF_ROVOL  ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RO;
8311 +                       if (tmp & AFS_VLSF_BACKVOL) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_BAK;
8312 +               }
8313 +
8314 +               entry->vid[0] = ntohl(*bp++);
8315 +               entry->vid[1] = ntohl(*bp++);
8316 +               entry->vid[2] = ntohl(*bp++);
8317 +
8318 +               bp++; /* clone ID */
8319 +
8320 +               tmp = ntohl(*bp++); /* flags */
8321 +               if (tmp & AFS_VLF_RWEXISTS  ) entry->vidmask |= AFS_CACHE_VOL_STM_RW;
8322 +               if (tmp & AFS_VLF_ROEXISTS  ) entry->vidmask |= AFS_CACHE_VOL_STM_RO;
8323 +               if (tmp & AFS_VLF_BACKEXISTS) entry->vidmask |= AFS_CACHE_VOL_STM_BAK;
8324 +
8325 +               ret = -ENOMEDIUM;
8326 +               if (!entry->vidmask) {
8327 +                       rxrpc_call_abort(op->call,ret);
8328 +                       goto done;
8329 +               }
8330 +
8331 +#if 0 /* TODO: remove */
8332 +               entry->nservers = 3;
8333 +               entry->servers[0].s_addr = htonl(0xac101249);
8334 +               entry->servers[1].s_addr = htonl(0xac101243);
8335 +               entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
8336 +
8337 +               entry->srvtmask[0] = AFS_CACHE_VOL_STM_RO;
8338 +               entry->srvtmask[1] = AFS_CACHE_VOL_STM_RO;
8339 +               entry->srvtmask[2] = AFS_CACHE_VOL_STM_RO | AFS_CACHE_VOL_STM_RW;
8340 +#endif
8341 +
8342 +               /* success */
8343 +               entry->ctime = xtime.tv_sec;
8344 +               ret = 0;
8345 +               goto done;
8346 +       }
8347 +
8348 +       if (op->call->app_call_state==RXRPC_CSTATE_ERROR) {
8349 +               /* operation error */
8350 +               ret = op->call->app_errno;
8351 +               goto done;
8352 +       }
8353 +
8354 +       _leave(" = -EAGAIN");
8355 +       return -EAGAIN;
8356 +
8357 + done:
8358 +       rxrpc_put_call(op->call);
8359 +       op->call = NULL;
8360 +       _leave(" = %d",ret);
8361 +       return ret;
8362 +} /* end afs_rxvl_get_entry_by_id_async2() */
8363 +
8364 +/*****************************************************************************/
8365 +/*
8366 + * handle attention events on an async get-entry-by-ID op
8367 + * - called from krxiod
8368 + */
8369 +static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call)
8370 +{
8371 +       afs_async_op_t *op = call->app_user;
8372 +
8373 +       _enter("{op=%p cst=%u}",op,call->app_call_state);
8374 +
8375 +       switch (call->app_call_state) {
8376 +       case RXRPC_CSTATE_COMPLETE:
8377 +               afs_kafsasyncd_attend_op(op);
8378 +               break;
8379 +       case RXRPC_CSTATE_CLNT_RCV_REPLY:
8380 +               if (call->app_async_read)
8381 +                       break;
8382 +       case RXRPC_CSTATE_CLNT_GOT_REPLY:
8383 +               if (call->app_read_count==0)
8384 +                       break;
8385 +               printk("kAFS: Reply bigger than expected {cst=%u asyn=%d mark=%d rdy=%u pr=%u%s}",
8386 +                      call->app_call_state,
8387 +                      call->app_async_read,
8388 +                      call->app_mark,
8389 +                      call->app_ready_qty,
8390 +                      call->pkt_rcv_count,
8391 +                      call->app_last_rcv ? " last" : "");
8392 +
8393 +               rxrpc_call_abort(call,-EBADMSG);
8394 +               break;
8395 +       default:
8396 +               BUG();
8397 +       }
8398 +
8399 +       _leave("");
8400 +
8401 +} /* end afs_rxvl_get_entry_by_id_attn() */
8402 +
8403 +/*****************************************************************************/
8404 +/*
8405 + * handle error events on an async get-entry-by-ID op
8406 + * - called from krxiod
8407 + */
8408 +static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call)
8409 +{
8410 +       afs_async_op_t *op = call->app_user;
8411 +
8412 +       _enter("{op=%p cst=%u}",op,call->app_call_state);
8413 +
8414 +       afs_kafsasyncd_attend_op(op);
8415 +
8416 +       _leave("");
8417 +
8418 +} /* end afs_rxvl_get_entry_by_id_error() */
8419 diff -urNp linux-5240/fs/afs/vlclient.h linux-5250/fs/afs/vlclient.h
8420 --- linux-5240/fs/afs/vlclient.h        1970-01-01 01:00:00.000000000 +0100
8421 +++ linux-5250/fs/afs/vlclient.h        
8422 @@ -0,0 +1,96 @@
8423 +/* vlclient.h: Volume Location Service client interface
8424 + *
8425 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
8426 + * Written by David Howells (dhowells@redhat.com)
8427 + *
8428 + * This program is free software; you can redistribute it and/or
8429 + * modify it under the terms of the GNU General Public License
8430 + * as published by the Free Software Foundation; either version
8431 + * 2 of the License, or (at your option) any later version.
8432 + */
8433 +
8434 +#ifndef _LINUX_AFS_VLCLIENT_H
8435 +#define _LINUX_AFS_VLCLIENT_H
8436 +
8437 +#include "types.h"
8438 +#include "cache-layout.h"
8439 +
8440 +enum AFSVL_Errors {
8441 +       AFSVL_IDEXIST           = 363520,       /* Volume Id entry exists in vl database */
8442 +       AFSVL_IO                = 363521,       /* I/O related error */
8443 +       AFSVL_NAMEEXIST         = 363522,       /* Volume name entry exists in vl database */
8444 +       AFSVL_CREATEFAIL        = 363523,       /* Internal creation failure */
8445 +       AFSVL_NOENT             = 363524,       /* No such entry */
8446 +       AFSVL_EMPTY             = 363525,       /* Vl database is empty */
8447 +       AFSVL_ENTDELETED        = 363526,       /* Entry is deleted (soft delete) */
8448 +       AFSVL_BADNAME           = 363527,       /* Volume name is illegal */
8449 +       AFSVL_BADINDEX          = 363528,       /* Index is out of range */
8450 +       AFSVL_BADVOLTYPE        = 363529,       /* Bad volume type */
8451 +       AFSVL_BADSERVER         = 363530,       /* Illegal server number (out of range) */
8452 +       AFSVL_BADPARTITION      = 363531,       /* Bad partition number */
8453 +       AFSVL_REPSFULL          = 363532,       /* Run out of space for Replication sites */
8454 +       AFSVL_NOREPSERVER       = 363533,       /* No such Replication server site exists */
8455 +       AFSVL_DUPREPSERVER      = 363534,       /* Replication site already exists */
8456 +       AFSVL_RWNOTFOUND        = 363535,       /* Parent R/W entry not found */
8457 +       AFSVL_BADREFCOUNT       = 363536,       /* Illegal Reference Count number */
8458 +       AFSVL_SIZEEXCEEDED      = 363537,       /* Vl size for attributes exceeded */
8459 +       AFSVL_BADENTRY          = 363538,       /* Bad incoming vl entry */
8460 +       AFSVL_BADVOLIDBUMP      = 363539,       /* Illegal max volid increment */
8461 +       AFSVL_IDALREADYHASHED   = 363540,       /* RO/BACK id already hashed */
8462 +       AFSVL_ENTRYLOCKED       = 363541,       /* Vl entry is already locked */
8463 +       AFSVL_BADVOLOPER        = 363542,       /* Bad volume operation code */
8464 +       AFSVL_BADRELLOCKTYPE    = 363543,       /* Bad release lock type */
8465 +       AFSVL_RERELEASE         = 363544,       /* Status report: last release was aborted */
8466 +       AFSVL_BADSERVERFLAG     = 363545,       /* Invalid replication site server Â°ag */
8467 +       AFSVL_PERM              = 363546,       /* No permission access */
8468 +       AFSVL_NOMEM             = 363547,       /* malloc/realloc failed to alloc enough memory */
8469 +};
8470 +
8471 +/* maps to "struct vldbentry" in vvl-spec.pdf */
8472 +struct  afsvl_dbentry {
8473 +       char            name[65];               /* name of volume (including NUL char) */
8474 +       afs_voltype_t   type;                   /* volume type */
8475 +       unsigned        num_servers;            /* num servers that hold instances of this vol */
8476 +       unsigned        clone_id;               /* cloning ID */
8477 +
8478 +       unsigned        flags;
8479 +#define AFS_VLF_RWEXISTS       0x1000          /* R/W volume exists */
8480 +#define AFS_VLF_ROEXISTS       0x2000          /* R/O volume exists */
8481 +#define AFS_VLF_BACKEXISTS     0x4000          /* backup volume exists */
8482 +
8483 +       afs_volid_t     volume_ids[3];          /* volume IDs */
8484 +
8485 +       struct {
8486 +               struct in_addr  addr;           /* server address */
8487 +               unsigned        partition;      /* partition ID on this server */
8488 +               unsigned        flags;          /* server specific flags */
8489 +#define AFS_VLSF_NEWREPSITE    0x0001  /* unused */
8490 +#define AFS_VLSF_ROVOL         0x0002  /* this server holds a R/O instance of the volume */
8491 +#define AFS_VLSF_RWVOL         0x0004  /* this server holds a R/W instance of the volume */
8492 +#define AFS_VLSF_BACKVOL       0x0008  /* this server holds a backup instance of the volume */
8493 +       } servers[8];
8494 +
8495 +};
8496 +
8497 +/* probe a volume location server to see if it is still alive */
8498 +extern int afs_rxvl_probe(afs_server_t *server, int alloc_flags);
8499 +
8500 +/* look up a volume location database entry by name */
8501 +extern int afs_rxvl_get_entry_by_name(afs_server_t *server,
8502 +                                     const char *volname,
8503 +                                     struct afs_cache_volume *entry);
8504 +
8505 +/* look up a volume location database entry by ID */
8506 +extern int afs_rxvl_get_entry_by_id(afs_server_t *server,
8507 +                                   afs_volid_t volid,
8508 +                                   afs_voltype_t voltype,
8509 +                                   struct afs_cache_volume *entry);
8510 +
8511 +extern int afs_rxvl_get_entry_by_id_async(afs_async_op_t *op,
8512 +                                         afs_volid_t volid,
8513 +                                         afs_voltype_t voltype);
8514 +
8515 +extern int afs_rxvl_get_entry_by_id_async2(afs_async_op_t *op,
8516 +                                          struct afs_cache_volume *entry);
8517 +
8518 +#endif /* _LINUX_AFS_VLCLIENT_H */
8519 diff -urNp linux-5240/fs/afs/vlocation.c linux-5250/fs/afs/vlocation.c
8520 --- linux-5240/fs/afs/vlocation.c       1970-01-01 01:00:00.000000000 +0100
8521 +++ linux-5250/fs/afs/vlocation.c       
8522 @@ -0,0 +1,833 @@
8523 +/* vlocation.c: volume location management
8524 + *
8525 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
8526 + * Written by David Howells (dhowells@redhat.com)
8527 + *
8528 + * This program is free software; you can redistribute it and/or
8529 + * modify it under the terms of the GNU General Public License
8530 + * as published by the Free Software Foundation; either version
8531 + * 2 of the License, or (at your option) any later version.
8532 + */
8533 +
8534 +#include <linux/kernel.h>
8535 +#include <linux/module.h>
8536 +#include <linux/init.h>
8537 +#include <linux/slab.h>
8538 +#include <linux/fs.h>
8539 +#include <linux/pagemap.h>
8540 +#include "volume.h"
8541 +#include "cell.h"
8542 +#include "cmservice.h"
8543 +#include "fsclient.h"
8544 +#include "vlclient.h"
8545 +#include "cache.h"
8546 +#include "kafstimod.h"
8547 +#include <rxrpc/connection.h>
8548 +#include "internal.h"
8549 +
8550 +#define AFS_VLDB_TIMEOUT HZ*1000
8551 +
8552 +static void afs_vlocation_update_timer(afs_timer_t *timer);
8553 +static void afs_vlocation_update_attend(afs_async_op_t *op);
8554 +static void afs_vlocation_update_discard(afs_async_op_t *op);
8555 +
8556 +static void __afs_vlocation_timeout(afs_timer_t *timer)
8557 +{
8558 +       afs_vlocation_t *vlocation = list_entry(timer,afs_vlocation_t,timeout);
8559 +
8560 +       _debug("VL TIMEOUT [%s{u=%d}]",vlocation->vldb.name,atomic_read(&vlocation->usage));
8561 +
8562 +       afs_vlocation_do_timeout(vlocation);
8563 +}
8564 +
8565 +static const struct afs_timer_ops afs_vlocation_timer_ops = {
8566 +       timed_out:      __afs_vlocation_timeout,
8567 +};
8568 +
8569 +static const struct afs_timer_ops afs_vlocation_update_timer_ops = {
8570 +       timed_out:      afs_vlocation_update_timer,
8571 +};
8572 +
8573 +static const struct afs_async_op_ops afs_vlocation_update_op_ops = {
8574 +       attend:         afs_vlocation_update_attend,
8575 +       discard:        afs_vlocation_update_discard,
8576 +};
8577 +
8578 +static LIST_HEAD(afs_vlocation_update_pendq);  /* queue of VLs awaiting update */
8579 +static afs_vlocation_t *afs_vlocation_update;  /* VL currently being updated */
8580 +static spinlock_t afs_vlocation_update_lock = SPIN_LOCK_UNLOCKED; /* lock guarding update queue */
8581 +
8582 +/*****************************************************************************/
8583 +/*
8584 + * iterate through the VL servers in a cell until one of them admits knowing about the volume in
8585 + * question
8586 + * - caller must have cell->vl_sem write-locked
8587 + */
8588 +static int afs_vlocation_access_vl_by_name(afs_vlocation_t *vlocation,
8589 +                                          const char *name,
8590 +                                          struct afs_cache_volume *vldb)
8591 +{
8592 +       afs_server_t *server = NULL;
8593 +       afs_cell_t *cell = vlocation->cell;
8594 +       int count, ret;
8595 +
8596 +       _enter("%s,%s,",cell->name,name);
8597 +
8598 +       ret = -ENOMEDIUM;
8599 +       for (count=cell->vl_naddrs; count>0; count--) {
8600 +               _debug("CellServ[%hu]: %08x",
8601 +                      cell->vl_curr_svix,cell->vl_addrs[cell->vl_curr_svix].s_addr);
8602 +
8603 +               /* try and create a server */
8604 +               ret = afs_server_lookup(cell,&cell->vl_addrs[cell->vl_curr_svix],&server);
8605 +               switch (ret) {
8606 +               case 0:
8607 +                       break;
8608 +               case -ENOMEM:
8609 +               case -ENONET:
8610 +                       goto out;
8611 +               default:
8612 +                       goto rotate;
8613 +               }
8614 +
8615 +               /* attempt to access the VL server */
8616 +               ret = afs_rxvl_get_entry_by_name(server,name,vldb);
8617 +               switch (ret) {
8618 +               case 0:
8619 +                       afs_put_server(server);
8620 +                       vlocation->vldb.cell_ix = cell->cache_ix;
8621 +                       goto out;
8622 +               case -ENOMEM:
8623 +               case -ENONET:
8624 +               case -ENETUNREACH:
8625 +               case -EHOSTUNREACH:
8626 +               case -ECONNREFUSED:
8627 +                       down_write(&server->sem);
8628 +                       if (server->vlserver) {
8629 +                               rxrpc_put_connection(server->vlserver);
8630 +                               server->vlserver = NULL;
8631 +                       }
8632 +                       up_write(&server->sem);
8633 +                       afs_put_server(server);
8634 +                       if (ret==-ENOMEM || ret==-ENONET)
8635 +                               goto out;
8636 +                       goto rotate;
8637 +               case -ENOMEDIUM:
8638 +                       afs_put_server(server);
8639 +                       goto out;
8640 +               default:
8641 +                       afs_put_server(server);
8642 +                       ret = -ENOMEDIUM;
8643 +                       goto rotate;
8644 +               }
8645 +
8646 +               /* rotate the server records upon lookup failure */
8647 +       rotate:
8648 +               cell->vl_curr_svix++;
8649 +               cell->vl_curr_svix %= cell->vl_naddrs;
8650 +       }
8651 +
8652 + out:
8653 +       _leave(" = %d",ret);
8654 +       return ret;
8655 +
8656 +} /* end afs_vlocation_access_vl_by_name() */
8657 +
8658 +/*****************************************************************************/
8659 +/*
8660 + * iterate through the VL servers in a cell until one of them admits knowing about the volume in
8661 + * question
8662 + * - caller must have cell->vl_sem write-locked
8663 + */
8664 +static int afs_vlocation_access_vl_by_id(afs_vlocation_t *vlocation,
8665 +                                        afs_volid_t volid,
8666 +                                        afs_voltype_t voltype,
8667 +                                        struct afs_cache_volume *vldb)
8668 +{
8669 +       afs_server_t *server = NULL;
8670 +       afs_cell_t *cell = vlocation->cell;
8671 +       int count, ret;
8672 +
8673 +       _enter("%s,%x,%d,",cell->name,volid,voltype);
8674 +
8675 +       ret = -ENOMEDIUM;
8676 +       for (count=cell->vl_naddrs; count>0; count--) {
8677 +               _debug("CellServ[%hu]: %08x",
8678 +                      cell->vl_curr_svix,cell->vl_addrs[cell->vl_curr_svix].s_addr);
8679 +
8680 +               /* try and create a server */
8681 +               ret = afs_server_lookup(cell,&cell->vl_addrs[cell->vl_curr_svix],&server);
8682 +               switch (ret) {
8683 +               case 0:
8684 +                       break;
8685 +               case -ENOMEM:
8686 +               case -ENONET:
8687 +                       goto out;
8688 +               default:
8689 +                       goto rotate;
8690 +               }
8691 +
8692 +               /* attempt to access the VL server */
8693 +               ret = afs_rxvl_get_entry_by_id(server,volid,voltype,vldb);
8694 +               switch (ret) {
8695 +               case 0:
8696 +                       afs_put_server(server);
8697 +                       vlocation->vldb.cell_ix = cell->cache_ix;
8698 +                       goto out;
8699 +               case -ENOMEM:
8700 +               case -ENONET:
8701 +               case -ENETUNREACH:
8702 +               case -EHOSTUNREACH:
8703 +               case -ECONNREFUSED:
8704 +                       down_write(&server->sem);
8705 +                       if (server->vlserver) {
8706 +                               rxrpc_put_connection(server->vlserver);
8707 +                               server->vlserver = NULL;
8708 +                       }
8709 +                       up_write(&server->sem);
8710 +                       afs_put_server(server);
8711 +                       if (ret==-ENOMEM || ret==-ENONET)
8712 +                               goto out;
8713 +                       goto rotate;
8714 +               case -ENOMEDIUM:
8715 +                       afs_put_server(server);
8716 +                       goto out;
8717 +               default:
8718 +                       afs_put_server(server);
8719 +                       ret = -ENOMEDIUM;
8720 +                       goto rotate;
8721 +               }
8722 +
8723 +               /* rotate the server records upon lookup failure */
8724 +       rotate:
8725 +               cell->vl_curr_svix++;
8726 +               cell->vl_curr_svix %= cell->vl_naddrs;
8727 +       }
8728 +
8729 + out:
8730 +       _leave(" = %d",ret);
8731 +       return ret;
8732 +
8733 +} /* end afs_vlocation_access_vl_by_id() */
8734 +
8735 +/*****************************************************************************/
8736 +/*
8737 + * lookup volume location
8738 + * - caller must have cell->vol_sem write-locked
8739 + * - iterate through the VL servers in a cell until one of them admits knowing about the volume in
8740 + *   question
8741 + * - lookup in the local cache if not able to find on the VL server
8742 + * - insert/update in the local cache if did get a VL response
8743 + */
8744 +int afs_vlocation_lookup(afs_cache_t *cache, afs_cell_t *cell, const char *name,
8745 +                        afs_vlocation_t **_vlocation)
8746 +{
8747 +       struct afs_cache_volume vldb;
8748 +       struct list_head *_p;
8749 +       afs_vlocation_t *vlocation;
8750 +       afs_voltype_t voltype;
8751 +       afs_volid_t vid;
8752 +       int active = 0, ret;
8753 +
8754 +       _enter(",%s,%s,",cell->name,name);
8755 +
8756 +       if (strlen(name)>sizeof(vlocation->vldb.name)) {
8757 +               _leave(" = -ENAMETOOLONG");
8758 +               return -ENAMETOOLONG;
8759 +       }
8760 +
8761 +       /* search the cell's active list first */
8762 +       list_for_each(_p,&cell->vl_list) {
8763 +               vlocation = list_entry(_p,afs_vlocation_t,link);
8764 +               if (strncmp(vlocation->vldb.name,name,sizeof(vlocation->vldb.name))==0)
8765 +                       goto found_in_memory;
8766 +       }
8767 +
8768 +       /* search the cell's graveyard list second */
8769 +       spin_lock(&cell->vl_gylock);
8770 +       list_for_each(_p,&cell->vl_graveyard) {
8771 +               vlocation = list_entry(_p,afs_vlocation_t,link);
8772 +               if (strncmp(vlocation->vldb.name,name,sizeof(vlocation->vldb.name))==0)
8773 +                       goto found_in_graveyard;
8774 +       }
8775 +       spin_unlock(&cell->vl_gylock);
8776 +
8777 +       /* not in the cell's in-memory lists - create a new record */
8778 +       vlocation = kmalloc(sizeof(afs_vlocation_t),GFP_KERNEL);
8779 +       if (!vlocation)
8780 +               return -ENOMEM;
8781 +
8782 +       memset(vlocation,0,sizeof(afs_vlocation_t));
8783 +       atomic_set(&vlocation->usage,1);
8784 +       INIT_LIST_HEAD(&vlocation->link);
8785 +       rwlock_init(&vlocation->lock);
8786 +       strncpy(vlocation->vldb.name,name,sizeof(vlocation->vldb.name));
8787 +
8788 +       afs_timer_init(&vlocation->timeout,&afs_vlocation_timer_ops);
8789 +       afs_timer_init(&vlocation->upd_timer,&afs_vlocation_update_timer_ops);
8790 +       afs_async_op_init(&vlocation->upd_op,&afs_vlocation_update_op_ops);
8791 +
8792 +#if 0
8793 +       afs_get_cache(cache);
8794 +       vlocation->cache = cache;
8795 +#endif
8796 +       afs_get_cell(cell);
8797 +       vlocation->cell = cell;
8798 +       vlocation->vldb.cell_ix = cell->cache_ix;
8799 +
8800 +       list_add_tail(&vlocation->link,&cell->vl_list);
8801 +
8802 +#if 0
8803 +       /* search local cache if wasn't in memory */
8804 +       ret = afs_cache_lookup_vlocation(vlocation);
8805 +       switch (ret) {
8806 +       default:        goto error;             /* disk error */
8807 +       case 0:         goto found_in_cache;    /* pulled from local cache into memory */
8808 +       case -ENOENT:   break;                  /* not in local cache */
8809 +       }
8810 +#endif
8811 +
8812 +       /* try to look up an unknown volume in the cell VL databases by name */
8813 +       ret = afs_vlocation_access_vl_by_name(vlocation,name,&vldb);
8814 +       if (ret<0) {
8815 +               printk("kAFS: failed to locate '%s' in cell '%s'\n",name,cell->name);
8816 +               goto error;
8817 +       }
8818 +
8819 +       goto found_on_vlserver;
8820 +
8821 + found_in_graveyard:
8822 +       /* found in the graveyard - resurrect */
8823 +       _debug("found in graveyard");
8824 +       atomic_inc(&vlocation->usage);
8825 +       list_del(&vlocation->link);
8826 +       list_add_tail(&vlocation->link,&cell->vl_list);
8827 +       spin_unlock(&cell->vl_gylock);
8828 +
8829 +       afs_kafstimod_del_timer(&vlocation->timeout);
8830 +       goto active;
8831 +
8832 + found_in_memory:
8833 +       /* found in memory - check to see if it's active */
8834 +       _debug("found in memory");
8835 +       atomic_inc(&vlocation->usage);
8836 +
8837 + active:
8838 +       active = 1;
8839 +
8840 +/* found_in_cache: */
8841 +       /* try to look up a cached volume in the cell VL databases by ID */
8842 +       _debug("found in cache");
8843 +
8844 +       _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
8845 +              vlocation->vldb.name,
8846 +              vlocation->vldb.vidmask,
8847 +              ntohl(vlocation->vldb.servers[0].s_addr),vlocation->vldb.srvtmask[0],
8848 +              ntohl(vlocation->vldb.servers[1].s_addr),vlocation->vldb.srvtmask[1],
8849 +              ntohl(vlocation->vldb.servers[2].s_addr),vlocation->vldb.srvtmask[2]
8850 +              );
8851 +
8852 +       _debug("Vids: %08x %08x %08x",
8853 +              vlocation->vldb.vid[0],vlocation->vldb.vid[1],vlocation->vldb.vid[2]);
8854 +
8855 +       if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_RW) {
8856 +               vid = vlocation->vldb.vid[0];
8857 +               voltype = AFSVL_RWVOL;
8858 +       }
8859 +       else if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_RO) {
8860 +               vid = vlocation->vldb.vid[1];
8861 +               voltype = AFSVL_ROVOL;
8862 +       }
8863 +       else if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_BAK) {
8864 +               vid = vlocation->vldb.vid[2];
8865 +               voltype = AFSVL_BACKVOL;
8866 +       }
8867 +       else {
8868 +               BUG();
8869 +               vid = 0;
8870 +               voltype = 0;
8871 +       }
8872 +
8873 +       ret = afs_vlocation_access_vl_by_id(vlocation,vid,voltype,&vldb);
8874 +       switch (ret) {
8875 +               /* net error */
8876 +       default:
8877 +               printk("kAFS: failed to volume '%s' (%x) up in '%s': %d\n",
8878 +                      name,vid,cell->name,ret);
8879 +               goto error;
8880 +
8881 +               /* pulled from local cache into memory */
8882 +       case 0: 
8883 +               goto found_on_vlserver;
8884 +
8885 +               /* uh oh... looks like the volume got deleted */
8886 +       case -ENOMEDIUM:
8887 +               printk("kAFS: volume '%s' (%x) does not exist '%s'\n",name,vid,cell->name);
8888 +
8889 +               /* TODO: make existing record unavailable */
8890 +               goto error;
8891 +       }
8892 +
8893 + found_on_vlserver:
8894 +       _debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
8895 +              name,
8896 +              vldb.vidmask,
8897 +              ntohl(vldb.servers[0].s_addr),vldb.srvtmask[0],
8898 +              ntohl(vldb.servers[1].s_addr),vldb.srvtmask[1],
8899 +              ntohl(vldb.servers[2].s_addr),vldb.srvtmask[2]
8900 +              );
8901 +
8902 +       _debug("Vids: %08x %08x %08x",vldb.vid[0],vldb.vid[1],vldb.vid[2]);
8903 +
8904 +       if (strncmp(vldb.name,name,sizeof(vlocation->vldb.name))!=0)
8905 +               printk("kAFS: name of volume '%s' changed to '%s' on server\n",name,vldb.name);
8906 +
8907 +       memcpy(&vlocation->vldb,&vldb,sizeof(vlocation->vldb));
8908 +       vlocation->vldb.cell_ix = cell->cache_ix;
8909 +
8910 +#if 0
8911 +       /* add volume entry to local cache */
8912 +       ret = afs_cache_update_vlocation(vlocation);
8913 +       if (ret<0)
8914 +               goto error;
8915 +#endif
8916 +
8917 +       afs_kafstimod_add_timer(&vlocation->upd_timer,10*HZ);
8918 +
8919 +       *_vlocation = vlocation;
8920 +       _leave(" = 0 (%p)",vlocation);
8921 +       return 0;
8922 +
8923 + error:
8924 +       if (vlocation) {
8925 +               if (active) {
8926 +                       __afs_put_vlocation(vlocation);
8927 +               }
8928 +               else {
8929 +                       list_del(&vlocation->link);
8930 +                       afs_put_cell(vlocation->cell);
8931 +#if 0
8932 +                       afs_put_cache(vlocation->cache);
8933 +#endif
8934 +                       kfree(vlocation);
8935 +               }
8936 +       }
8937 +
8938 +       _leave(" = %d",ret);
8939 +       return ret;
8940 +} /* end afs_vlocation_lookup() */
8941 +
8942 +/*****************************************************************************/
8943 +/*
8944 + * finish using a volume location record
8945 + * - caller must have cell->vol_sem write-locked
8946 + */
8947 +void __afs_put_vlocation(afs_vlocation_t *vlocation)
8948 +{
8949 +       afs_cell_t *cell = vlocation->cell;
8950 +
8951 +       _enter("%s",vlocation->vldb.name);
8952 +
8953 +       /* sanity check */
8954 +       if (atomic_read(&vlocation->usage)<=0)
8955 +               BUG();
8956 +
8957 +       spin_lock(&cell->vl_gylock);
8958 +       if (likely(!atomic_dec_and_test(&vlocation->usage))) {
8959 +               spin_unlock(&cell->vl_gylock);
8960 +               _leave("");
8961 +               return;
8962 +       }
8963 +
8964 +       /* move to graveyard queue */
8965 +       list_del(&vlocation->link);
8966 +       list_add_tail(&vlocation->link,&cell->vl_graveyard);
8967 +
8968 +       /* remove from pending timeout queue (refcounted if actually being updated) */
8969 +       list_del_init(&vlocation->upd_op.link);
8970 +
8971 +       /* time out in 10 secs */
8972 +       afs_kafstimod_del_timer(&vlocation->upd_timer);
8973 +       afs_kafstimod_add_timer(&vlocation->timeout,10*HZ);
8974 +
8975 +       spin_unlock(&cell->vl_gylock);
8976 +
8977 +       _leave(" [killed]");
8978 +} /* end __afs_put_vlocation() */
8979 +
8980 +/*****************************************************************************/
8981 +/*
8982 + * finish using a volume location record
8983 + */
8984 +void afs_put_vlocation(afs_vlocation_t *vlocation)
8985 +{
8986 +       afs_cell_t *cell = vlocation->cell;
8987 +
8988 +       down_write(&cell->vl_sem);
8989 +       __afs_put_vlocation(vlocation);
8990 +       up_write(&cell->vl_sem);
8991 +} /* end afs_put_vlocation() */
8992 +
8993 +/*****************************************************************************/
8994 +/*
8995 + * timeout vlocation record
8996 + * - removes from the cell's graveyard if the usage count is zero
8997 + */
8998 +void afs_vlocation_do_timeout(afs_vlocation_t *vlocation)
8999 +{
9000 +       afs_cell_t *cell;
9001 +
9002 +       _enter("%s",vlocation->vldb.name);
9003 +
9004 +       cell = vlocation->cell;
9005 +
9006 +       if (atomic_read(&vlocation->usage)<0) BUG();
9007 +
9008 +       /* remove from graveyard if still dead */
9009 +       spin_lock(&cell->vl_gylock);
9010 +       if (atomic_read(&vlocation->usage)==0)
9011 +               list_del_init(&vlocation->link);
9012 +       else
9013 +               vlocation = NULL;
9014 +       spin_unlock(&cell->vl_gylock);
9015 +
9016 +       if (!vlocation) {
9017 +               _leave("");
9018 +               return; /* resurrected */
9019 +       }
9020 +
9021 +       /* we can now destroy it properly */
9022 +       afs_put_cell(cell);
9023 +#if 0
9024 +       afs_put_cache(vlocation->cache);
9025 +#endif
9026 +
9027 +       kfree(vlocation);
9028 +
9029 +       _leave(" [destroyed]");
9030 +} /* end afs_vlocation_do_timeout() */
9031 +
9032 +/*****************************************************************************/
9033 +/*
9034 + * send an update operation to the currently selected server
9035 + */
9036 +static int afs_vlocation_update_begin(afs_vlocation_t *vlocation)
9037 +{
9038 +       afs_voltype_t voltype;
9039 +       afs_volid_t vid;
9040 +       int ret;
9041 +
9042 +       _enter("%s{ufs=%u ucs=%u}",
9043 +              vlocation->vldb.name,vlocation->upd_first_svix,vlocation->upd_curr_svix);
9044 +
9045 +       /* try to look up a cached volume in the cell VL databases by ID */
9046 +       if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_RW) {
9047 +               vid = vlocation->vldb.vid[0];
9048 +               voltype = AFSVL_RWVOL;
9049 +       }
9050 +       else if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_RO) {
9051 +               vid = vlocation->vldb.vid[1];
9052 +               voltype = AFSVL_ROVOL;
9053 +       }
9054 +       else if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_BAK) {
9055 +               vid = vlocation->vldb.vid[2];
9056 +               voltype = AFSVL_BACKVOL;
9057 +       }
9058 +       else {
9059 +               BUG();
9060 +               vid = 0;
9061 +               voltype = 0;
9062 +       }
9063 +
9064 +       /* contact the chosen server */
9065 +       ret = afs_server_lookup(vlocation->cell,
9066 +                               &vlocation->cell->vl_addrs[vlocation->upd_curr_svix],
9067 +                               &vlocation->upd_op.server);
9068 +       switch (ret) {
9069 +       case 0:
9070 +               break;
9071 +       case -ENOMEM:
9072 +       case -ENONET:
9073 +       default:
9074 +               _leave(" = %d",ret);
9075 +               return ret;
9076 +       }
9077 +
9078 +       /* initiate the update operation */
9079 +       ret = afs_rxvl_get_entry_by_id_async(&vlocation->upd_op,vid,voltype);
9080 +       if (ret<0) {
9081 +               _leave(" = %d",ret);
9082 +               return ret;
9083 +       }
9084 +
9085 +       _leave(" = %d",ret);
9086 +       return ret;
9087 +} /* end afs_vlocation_update_begin() */
9088 +
9089 +/*****************************************************************************/
9090 +/*
9091 + * abandon updating a VL record
9092 + * - does not restart the update timer
9093 + */
9094 +static void afs_vlocation_update_abandon(afs_vlocation_t *vlocation,
9095 +                                        afs_vlocation_upd_t state,
9096 +                                        int ret)
9097 +{
9098 +       _enter("%s,%u",vlocation->vldb.name,state);
9099 +
9100 +       if (ret<0)
9101 +               printk("kAFS: Abandoning VL update '%s': %d\n",vlocation->vldb.name,ret);
9102 +
9103 +       /* discard the server record */
9104 +       if (vlocation->upd_op.server) {
9105 +               afs_put_server(vlocation->upd_op.server);
9106 +               vlocation->upd_op.server = NULL;
9107 +       }
9108 +
9109 +       spin_lock(&afs_vlocation_update_lock);
9110 +       afs_vlocation_update = NULL;
9111 +       vlocation->upd_state = state;
9112 +
9113 +       /* TODO: start updating next VL record on pending list */
9114 +
9115 +       spin_unlock(&afs_vlocation_update_lock);
9116 +
9117 +       _leave("");
9118 +} /* end afs_vlocation_update_abandon() */
9119 +
9120 +/*****************************************************************************/
9121 +/*
9122 + * handle periodic update timeouts and busy retry timeouts
9123 + * - called from kafstimod
9124 + */
9125 +static void afs_vlocation_update_timer(afs_timer_t *timer)
9126 +{
9127 +       afs_vlocation_t *vlocation = list_entry(timer,afs_vlocation_t,upd_timer);
9128 +       int ret;
9129 +
9130 +       _enter("%s",vlocation->vldb.name);
9131 +
9132 +       /* only update if not in the graveyard (defend against putting too) */
9133 +       spin_lock(&vlocation->cell->vl_gylock);
9134 +
9135 +       if (!atomic_read(&vlocation->usage))
9136 +               goto out_unlock1;
9137 +
9138 +       spin_lock(&afs_vlocation_update_lock);
9139 +
9140 +       /* if we were woken up due to EBUSY sleep then restart immediately if possible or else jump
9141 +        * to front of pending queue */
9142 +       if (vlocation->upd_state==AFS_VLUPD_BUSYSLEEP) {
9143 +               if (afs_vlocation_update) {
9144 +                       list_add(&vlocation->upd_op.link,&afs_vlocation_update_pendq);
9145 +               }
9146 +               else {
9147 +                       afs_get_vlocation(vlocation);
9148 +                       afs_vlocation_update = vlocation;
9149 +                       vlocation->upd_state = AFS_VLUPD_INPROGRESS;
9150 +               }
9151 +               goto out_unlock2;
9152 +       }
9153 +
9154 +       /* put on pending queue if there's already another update in progress */
9155 +       if (afs_vlocation_update) {
9156 +               vlocation->upd_state = AFS_VLUPD_PENDING;
9157 +               list_add_tail(&vlocation->upd_op.link,&afs_vlocation_update_pendq);
9158 +               goto out_unlock2;
9159 +       }
9160 +
9161 +       /* hold a ref on it while actually updating */
9162 +       afs_get_vlocation(vlocation);
9163 +       afs_vlocation_update = vlocation;
9164 +       vlocation->upd_state = AFS_VLUPD_INPROGRESS;
9165 +
9166 +       spin_unlock(&afs_vlocation_update_lock);
9167 +       spin_unlock(&vlocation->cell->vl_gylock);
9168 +
9169 +       /* okay... we can start the update */
9170 +       _debug("BEGIN VL UPDATE [%s]",vlocation->vldb.name);
9171 +       vlocation->upd_first_svix = vlocation->cell->vl_curr_svix;
9172 +       vlocation->upd_curr_svix = vlocation->upd_first_svix;
9173 +       vlocation->upd_rej_cnt = 0;
9174 +       vlocation->upd_busy_cnt = 0;
9175 +
9176 +       ret = afs_vlocation_update_begin(vlocation);
9177 +       if (ret<0) {
9178 +               afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,ret);
9179 +               afs_kafstimod_add_timer(&vlocation->upd_timer,AFS_VLDB_TIMEOUT);
9180 +               afs_put_vlocation(vlocation);
9181 +       }
9182 +
9183 +       _leave("");
9184 +       return;
9185 +
9186 + out_unlock2:
9187 +       spin_unlock(&afs_vlocation_update_lock);
9188 + out_unlock1:
9189 +       spin_unlock(&vlocation->cell->vl_gylock);
9190 +       _leave("");
9191 +       return;
9192 +
9193 +} /* end afs_vlocation_update_timer() */
9194 +
9195 +/*****************************************************************************/
9196 +/*
9197 + * attend to an update operation upon which an event happened
9198 + * - called in kafsasyncd context
9199 + */
9200 +static void afs_vlocation_update_attend(afs_async_op_t *op)
9201 +{
9202 +       struct afs_cache_volume vldb;
9203 +       afs_vlocation_t *vlocation = list_entry(op,afs_vlocation_t,upd_op);
9204 +       unsigned tmp;
9205 +       int ret;
9206 +
9207 +       _enter("%s",vlocation->vldb.name);
9208 +
9209 +       ret = afs_rxvl_get_entry_by_id_async2(op,&vldb);
9210 +       switch (ret) {
9211 +       case -EAGAIN:
9212 +               _leave(" [unfinished]");
9213 +               return;
9214 +
9215 +       case 0:
9216 +               _debug("END VL UPDATE: %d\n",ret);
9217 +               vlocation->valid = 1;
9218 +
9219 +               _debug("Done VL Lookup: %02x { %08x(%x) %08x(%x) %08x(%x) }",
9220 +                      vldb.vidmask,
9221 +                      ntohl(vldb.servers[0].s_addr),vldb.srvtmask[0],
9222 +                      ntohl(vldb.servers[1].s_addr),vldb.srvtmask[1],
9223 +                      ntohl(vldb.servers[2].s_addr),vldb.srvtmask[2]
9224 +                      );
9225 +
9226 +               _debug("Vids: %08x %08x %08x",vldb.vid[0],vldb.vid[1],vldb.vid[2]);
9227 +
9228 +               afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,0);
9229 +
9230 +               down_write(&vlocation->cell->vl_sem);
9231 +
9232 +               /* actually update the cache */
9233 +               if (strncmp(vldb.name,vlocation->vldb.name,sizeof(vlocation->vldb.name))!=0)
9234 +                       printk("kAFS: name of volume '%s' changed to '%s' on server\n",
9235 +                              vlocation->vldb.name,vldb.name);
9236 +
9237 +               memcpy(&vlocation->vldb,&vldb,sizeof(vlocation->vldb));
9238 +               vlocation->vldb.cell_ix = vlocation->cell->cache_ix;
9239 +
9240 +#if 0
9241 +               /* add volume entry to local cache */
9242 +               ret = afs_cache_update_vlocation(vlocation);
9243 +#endif
9244 +
9245 +               up_write(&vlocation->cell->vl_sem);
9246 +
9247 +               if (ret<0)
9248 +                       printk("kAFS: failed to update local cache: %d\n",ret);
9249 +
9250 +               afs_kafstimod_add_timer(&vlocation->upd_timer,AFS_VLDB_TIMEOUT);
9251 +               afs_put_vlocation(vlocation);
9252 +               _leave(" [found]");
9253 +               return;
9254 +
9255 +       case -ENOMEDIUM:
9256 +               vlocation->upd_rej_cnt++;
9257 +               goto try_next;
9258 +
9259 +               /* the server is locked - retry in a very short while */
9260 +       case -EBUSY:
9261 +               vlocation->upd_busy_cnt++;
9262 +               if (vlocation->upd_busy_cnt>3)
9263 +                       goto try_next; /* too many retries */
9264 +
9265 +               afs_vlocation_update_abandon(vlocation,AFS_VLUPD_BUSYSLEEP,0);
9266 +               afs_kafstimod_add_timer(&vlocation->upd_timer,HZ/2);
9267 +               afs_put_vlocation(vlocation);
9268 +               _leave(" [busy]");
9269 +               return;
9270 +
9271 +       case -ENETUNREACH:
9272 +       case -EHOSTUNREACH:
9273 +       case -ECONNREFUSED:
9274 +       case -EREMOTEIO:
9275 +               /* record bad vlserver info in the cell too
9276 +                * - TODO: use down_write_trylock() if available
9277 +                */
9278 +               if (vlocation->upd_curr_svix == vlocation->cell->vl_curr_svix)
9279 +                       vlocation->cell->vl_curr_svix =
9280 +                               vlocation->cell->vl_curr_svix % vlocation->cell->vl_naddrs;
9281 +
9282 +       case -EBADRQC:
9283 +       case -EINVAL:
9284 +       case -EACCES:
9285 +       case -EBADMSG:
9286 +               goto try_next;
9287 +
9288 +       default:
9289 +               goto abandon;
9290 +       }
9291 +
9292 +       /* try contacting the next server */
9293 + try_next:
9294 +       vlocation->upd_busy_cnt = 0;
9295 +
9296 +       if (vlocation->upd_op.server) {
9297 +               /* discard the server record */
9298 +               afs_put_server(vlocation->upd_op.server);
9299 +               vlocation->upd_op.server = NULL;
9300 +       }
9301 +
9302 +       tmp = vlocation->cell->vl_naddrs;
9303 +       if (tmp==0)
9304 +               goto abandon;
9305 +
9306 +       vlocation->upd_curr_svix++;
9307 +       if (vlocation->upd_curr_svix >= tmp) vlocation->upd_curr_svix = 0;
9308 +       if (vlocation->upd_first_svix >= tmp) vlocation->upd_first_svix = tmp - 1;
9309 +
9310 +       /* move to the next server */
9311 +       if (vlocation->upd_curr_svix!=vlocation->upd_first_svix) {
9312 +               afs_vlocation_update_begin(vlocation);
9313 +               _leave(" [next]");
9314 +               return;
9315 +       }
9316 +
9317 +       /* run out of servers to try - was the volume rejected? */
9318 +       if (vlocation->upd_rej_cnt>0) {
9319 +               printk("kAFS: Active volume no longer valid '%s'\n",vlocation->vldb.name);
9320 +               vlocation->valid = 0;
9321 +               afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,0);
9322 +               afs_kafstimod_add_timer(&vlocation->upd_timer,AFS_VLDB_TIMEOUT);
9323 +               afs_put_vlocation(vlocation);
9324 +               _leave(" [invalidated]");
9325 +               return;
9326 +       }
9327 +
9328 +       /* abandon the update */
9329 + abandon:
9330 +       afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,ret);
9331 +       afs_kafstimod_add_timer(&vlocation->upd_timer,HZ*10);
9332 +       afs_put_vlocation(vlocation);
9333 +       _leave(" [abandoned]");
9334 +
9335 +} /* end afs_vlocation_update_attend() */
9336 +
9337 +/*****************************************************************************/
9338 +/*
9339 + * deal with an update operation being discarded
9340 + * - called in kafsasyncd context when it's dying due to rmmod
9341 + * - the call has already been aborted and put()'d
9342 + */
9343 +static void afs_vlocation_update_discard(afs_async_op_t *op)
9344 +{
9345 +       afs_vlocation_t *vlocation = list_entry(op,afs_vlocation_t,upd_op);
9346 +
9347 +       _enter("%s",vlocation->vldb.name);
9348 +
9349 +       afs_put_server(op->server);
9350 +       op->server = NULL;
9351 +
9352 +       afs_put_vlocation(vlocation);
9353 +
9354 +       _leave("");
9355 +} /* end afs_vlocation_update_discard() */
9356 diff -urNp linux-5240/fs/afs/vnode.c linux-5250/fs/afs/vnode.c
9357 --- linux-5240/fs/afs/vnode.c   1970-01-01 01:00:00.000000000 +0100
9358 +++ linux-5250/fs/afs/vnode.c   
9359 @@ -0,0 +1,317 @@
9360 +/* vnode.c: AFS vnode management
9361 + *
9362 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
9363 + * Written by David Howells (dhowells@redhat.com)
9364 + *
9365 + * This program is free software; you can redistribute it and/or
9366 + * modify it under the terms of the GNU General Public License
9367 + * as published by the Free Software Foundation; either version
9368 + * 2 of the License, or (at your option) any later version.
9369 + */
9370 +
9371 +#include <linux/kernel.h>
9372 +#include <linux/module.h>
9373 +#include <linux/init.h>
9374 +#include <linux/slab.h>
9375 +#include <linux/fs.h>
9376 +#include <linux/pagemap.h>
9377 +#include "volume.h"
9378 +#include "cell.h"
9379 +#include "cmservice.h"
9380 +#include "fsclient.h"
9381 +#include "vlclient.h"
9382 +#include "cache.h"
9383 +#include "vnode.h"
9384 +#include "internal.h"
9385 +
9386 +static void afs_vnode_cb_timed_out(struct afs_timer *timer);
9387 +
9388 +struct afs_timer_ops afs_vnode_cb_timed_out_ops = {
9389 +       timed_out:      afs_vnode_cb_timed_out,
9390 +};
9391 +
9392 +/*****************************************************************************/
9393 +/*
9394 + * handle a callback timing out
9395 + * TODO: retain a ref to vnode struct for an outstanding callback timeout
9396 + */
9397 +static void afs_vnode_cb_timed_out(struct afs_timer *timer)
9398 +{
9399 +       afs_server_t *oldserver;
9400 +       afs_vnode_t *vnode;
9401 +
9402 +       vnode = list_entry(timer,afs_vnode_t,cb_timeout);
9403 +
9404 +       _enter("%p",vnode);
9405 +
9406 +       /* set the changed flag in the vnode and release the server */
9407 +       spin_lock(&vnode->lock);
9408 +
9409 +       oldserver = xchg(&vnode->cb_server,NULL);
9410 +       if (oldserver) {
9411 +               vnode->flags |= AFS_VNODE_CHANGED;
9412 +
9413 +               spin_lock(&afs_cb_hash_lock);
9414 +               list_del_init(&vnode->cb_hash_link);
9415 +               spin_unlock(&afs_cb_hash_lock);
9416 +
9417 +               spin_lock(&oldserver->cb_lock);
9418 +               list_del_init(&vnode->cb_link);
9419 +               spin_unlock(&oldserver->cb_lock);
9420 +       }
9421 +
9422 +       spin_unlock(&vnode->lock);
9423 +
9424 +       if (oldserver)
9425 +               afs_put_server(oldserver);
9426 +
9427 +       _leave("");
9428 +} /* end afs_vnode_cb_timed_out() */
9429 +
9430 +/*****************************************************************************/
9431 +/*
9432 + * finish off updating the recorded status of a file
9433 + * - starts callback expiry timer
9434 + * - adds to server's callback list
9435 + */
9436 +void afs_vnode_finalise_status_update(afs_vnode_t *vnode, afs_server_t *server, int ret)
9437 +{
9438 +       afs_server_t *oldserver = NULL;
9439 +
9440 +       _enter("%p,%p,%d",vnode,server,ret);
9441 +
9442 +       spin_lock(&vnode->lock);
9443 +
9444 +       vnode->flags &= ~AFS_VNODE_CHANGED;
9445 +
9446 +       if (ret==0) {
9447 +               /* adjust the callback timeout appropriately */
9448 +               afs_kafstimod_add_timer(&vnode->cb_timeout,vnode->cb_expiry*HZ);
9449 +
9450 +               spin_lock(&afs_cb_hash_lock);
9451 +               list_del(&vnode->cb_hash_link);
9452 +               list_add_tail(&vnode->cb_hash_link,&afs_cb_hash(server,&vnode->fid));
9453 +               spin_unlock(&afs_cb_hash_lock);
9454 +
9455 +               /* swap ref to old callback server with that for new callback server */
9456 +               oldserver = xchg(&vnode->cb_server,server);
9457 +               if (oldserver!=server) {
9458 +                       if (oldserver) {
9459 +                               spin_lock(&oldserver->cb_lock);
9460 +                               list_del_init(&vnode->cb_link);
9461 +                               spin_unlock(&oldserver->cb_lock);
9462 +                       }
9463 +
9464 +                       afs_get_server(server);
9465 +                       spin_lock(&server->cb_lock);
9466 +                       list_add_tail(&vnode->cb_link,&server->cb_promises);
9467 +                       spin_unlock(&server->cb_lock);
9468 +               }
9469 +               else {
9470 +                       /* same server */
9471 +                       oldserver = NULL;
9472 +               }
9473 +       }
9474 +       else if (ret==-ENOENT) {
9475 +               /* the file was deleted - clear the callback timeout */
9476 +               oldserver = xchg(&vnode->cb_server,NULL);
9477 +               afs_kafstimod_del_timer(&vnode->cb_timeout);
9478 +
9479 +               _debug("got NOENT from server - marking file deleted");
9480 +               vnode->flags |= AFS_VNODE_DELETED;
9481 +       }
9482 +
9483 +       vnode->update_cnt--;
9484 +
9485 +       spin_unlock(&vnode->lock);
9486 +
9487 +       wake_up_all(&vnode->update_waitq);
9488 +
9489 +       if (oldserver)
9490 +               afs_put_server(oldserver);
9491 +
9492 +       _leave("");
9493 +
9494 +} /* end afs_vnode_finalise_status_update() */
9495 +
9496 +/*****************************************************************************/
9497 +/*
9498 + * fetch file status from the volume
9499 + * - don't issue a fetch if:
9500 + *   - the changed bit is not set and there's a valid callback
9501 + *   - there are any outstanding ops that will fetch the status
9502 + * - TODO implement local caching
9503 + */
9504 +int afs_vnode_fetch_status(afs_vnode_t *vnode)
9505 +{
9506 +       afs_server_t *server;
9507 +       int ret;
9508 +
9509 +       DECLARE_WAITQUEUE(myself,current);
9510 +
9511 +       _enter("%s,{%u,%u,%u}",vnode->volume->vlocation->vldb.name,
9512 +              vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
9513 +
9514 +       if (!(vnode->flags & AFS_VNODE_CHANGED) && vnode->cb_server) {
9515 +               _leave(" [unchanged]");
9516 +               return 0;
9517 +       }
9518 +
9519 +       if (vnode->flags & AFS_VNODE_DELETED) {
9520 +               _leave(" [deleted]");
9521 +               return -ENOENT;
9522 +       }
9523 +
9524 +       spin_lock(&vnode->lock);
9525 +
9526 +       if (!(vnode->flags & AFS_VNODE_CHANGED)) {
9527 +               spin_unlock(&vnode->lock);
9528 +               _leave(" [unchanged]");
9529 +               return 0;
9530 +       }
9531 +
9532 +       if (vnode->update_cnt>0) {
9533 +               /* someone else started a fetch */
9534 +               set_current_state(TASK_UNINTERRUPTIBLE);
9535 +               add_wait_queue(&vnode->update_waitq,&myself);
9536 +
9537 +               /* wait for the status to be updated */
9538 +               for (;;) {
9539 +                       if (!(vnode->flags & AFS_VNODE_CHANGED))        break;
9540 +                       if (vnode->flags & AFS_VNODE_DELETED)           break;
9541 +
9542 +                       /* it got updated and invalidated all before we saw it */
9543 +                       if (vnode->update_cnt==0) {
9544 +                               remove_wait_queue(&vnode->update_waitq,&myself);
9545 +                               set_current_state(TASK_RUNNING);
9546 +                               goto get_anyway;
9547 +                       }
9548 +
9549 +                       spin_unlock(&vnode->lock);
9550 +
9551 +                       schedule();
9552 +                       set_current_state(TASK_UNINTERRUPTIBLE);
9553 +
9554 +                       spin_lock(&vnode->lock);
9555 +               }
9556 +
9557 +               remove_wait_queue(&vnode->update_waitq,&myself);
9558 +               spin_unlock(&vnode->lock);
9559 +               set_current_state(TASK_RUNNING);
9560 +
9561 +               return vnode->flags & AFS_VNODE_DELETED ? -ENOENT : 0;
9562 +       }
9563 +
9564 + get_anyway:
9565 +       /* okay... we're going to have to initiate the op */
9566 +       vnode->update_cnt++;
9567 +
9568 +       spin_unlock(&vnode->lock);
9569 +
9570 +       /* merge AFS status fetches and clear outstanding callback on this vnode */
9571 +       do {
9572 +               /* pick a server to query */
9573 +               ret = afs_volume_pick_fileserver(vnode->volume,&server);
9574 +               if (ret<0)
9575 +                       return ret;
9576 +
9577 +               _debug("USING SERVER: %08x\n",ntohl(server->addr.s_addr));
9578 +
9579 +               ret = afs_rxfs_fetch_file_status(server,vnode,NULL);
9580 +
9581 +       } while (!afs_volume_release_fileserver(vnode->volume,server,ret));
9582 +
9583 +       /* adjust the flags */
9584 +       afs_vnode_finalise_status_update(vnode,server,ret);
9585 +
9586 +       _leave(" = %d",ret);
9587 +       return ret;
9588 +} /* end afs_vnode_fetch_status() */
9589 +
9590 +/*****************************************************************************/
9591 +/*
9592 + * fetch file data from the volume
9593 + * - TODO implement caching and server failover
9594 + */
9595 +int afs_vnode_fetch_data(afs_vnode_t *vnode, struct afs_rxfs_fetch_descriptor *desc)
9596 +{
9597 +       afs_server_t *server;
9598 +       int ret;
9599 +
9600 +       _enter("%s,{%u,%u,%u}",
9601 +              vnode->volume->vlocation->vldb.name,
9602 +              vnode->fid.vid,
9603 +              vnode->fid.vnode,
9604 +              vnode->fid.unique);
9605 +
9606 +       /* this op will fetch the status */
9607 +       spin_lock(&vnode->lock);
9608 +       vnode->update_cnt++;
9609 +       spin_unlock(&vnode->lock);
9610 +
9611 +       /* merge in AFS status fetches and clear outstanding callback on this vnode */
9612 +       do {
9613 +               /* pick a server to query */
9614 +               ret = afs_volume_pick_fileserver(vnode->volume,&server);
9615 +               if (ret<0)
9616 +                       return ret;
9617 +
9618 +               _debug("USING SERVER: %08x\n",ntohl(server->addr.s_addr));
9619 +
9620 +               ret = afs_rxfs_fetch_file_data(server,vnode,desc,NULL);
9621 +
9622 +       } while (!afs_volume_release_fileserver(vnode->volume,server,ret));
9623 +
9624 +       /* adjust the flags */
9625 +       afs_vnode_finalise_status_update(vnode,server,ret);
9626 +
9627 +       _leave(" = %d",ret);
9628 +       return ret;
9629 +
9630 +} /* end afs_vnode_fetch_data() */
9631 +
9632 +/*****************************************************************************/
9633 +/*
9634 + * break any outstanding callback on a vnode
9635 + * - only relevent to server that issued it
9636 + */
9637 +int afs_vnode_give_up_callback(afs_vnode_t *vnode)
9638 +{
9639 +       afs_server_t *server;
9640 +       int ret;
9641 +
9642 +       _enter("%s,{%u,%u,%u}",
9643 +              vnode->volume->vlocation->vldb.name,
9644 +              vnode->fid.vid,
9645 +              vnode->fid.vnode,
9646 +              vnode->fid.unique);
9647 +
9648 +       spin_lock(&afs_cb_hash_lock);
9649 +       list_del_init(&vnode->cb_hash_link);
9650 +       spin_unlock(&afs_cb_hash_lock);
9651 +
9652 +       /* set the changed flag in the vnode and release the server */
9653 +       spin_lock(&vnode->lock);
9654 +
9655 +       afs_kafstimod_del_timer(&vnode->cb_timeout);
9656 +
9657 +       server = xchg(&vnode->cb_server,NULL);
9658 +       if (server) {
9659 +               vnode->flags |= AFS_VNODE_CHANGED;
9660 +
9661 +               spin_lock(&server->cb_lock);
9662 +               list_del_init(&vnode->cb_link);
9663 +               spin_unlock(&server->cb_lock);
9664 +       }
9665 +
9666 +       spin_unlock(&vnode->lock);
9667 +
9668 +       ret = 0;
9669 +       if (server) {
9670 +               ret = afs_rxfs_give_up_callback(server,vnode);
9671 +               afs_put_server(server);
9672 +       }
9673 +
9674 +       _leave(" = %d",ret);
9675 +       return ret;
9676 +} /* end afs_vnode_give_up_callback() */
9677 diff -urNp linux-5240/fs/afs/vnode.h linux-5250/fs/afs/vnode.h
9678 --- linux-5240/fs/afs/vnode.h   1970-01-01 01:00:00.000000000 +0100
9679 +++ linux-5250/fs/afs/vnode.h   
9680 @@ -0,0 +1,88 @@
9681 +/* vnode.h: AFS vnode record
9682 + *
9683 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
9684 + * Written by David Howells (dhowells@redhat.com)
9685 + *
9686 + * This program is free software; you can redistribute it and/or
9687 + * modify it under the terms of the GNU General Public License
9688 + * as published by the Free Software Foundation; either version
9689 + * 2 of the License, or (at your option) any later version.
9690 + */
9691 +
9692 +#ifndef _LINUX_AFS_VNODE_H
9693 +#define _LINUX_AFS_VNODE_H
9694 +
9695 +#include <linux/fs.h>
9696 +#include <linux/version.h>
9697 +#include "server.h"
9698 +#include "kafstimod.h"
9699 +
9700 +#ifdef __KERNEL__
9701 +
9702 +struct afs_rxfs_fetch_descriptor;
9703 +
9704 +/*****************************************************************************/
9705 +/*
9706 + * AFS inode private data
9707 + */
9708 +struct afs_vnode
9709 +{
9710 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
9711 +       struct inode            vfs_inode;      /* the VFS's inode record */
9712 +#else
9713 +       struct inode            *inode;         /* the VFS's inode */
9714 +#endif
9715 +
9716 +       afs_volume_t            *volume;        /* volume on which vnode resides */
9717 +       afs_fid_t               fid;            /* the file identifier for this inode */
9718 +       afs_file_status_t       status;         /* AFS status info for this file */
9719 +       unsigned                nix;            /* vnode index in cache */
9720 +
9721 +       wait_queue_head_t       update_waitq;   /* status fetch waitqueue */
9722 +       unsigned                update_cnt;     /* number of outstanding ops that will update the
9723 +                                                * status */
9724 +       spinlock_t              lock;           /* waitqueue/flags lock */
9725 +       unsigned                flags;
9726 +#define AFS_VNODE_CHANGED      0x00000001      /* set if vnode reported changed by callback */
9727 +#define AFS_VNODE_DELETED      0x00000002      /* set if vnode deleted on server */
9728 +#define AFS_VNODE_MOUNTPOINT   0x00000004      /* set if vnode is a mountpoint symlink */
9729 +
9730 +       /* outstanding callback notification on this file */
9731 +       afs_server_t            *cb_server;     /* server that made the current promise */
9732 +       struct list_head        cb_link;        /* link in server's promises list */
9733 +       struct list_head        cb_hash_link;   /* link in master callback hash */
9734 +       afs_timer_t             cb_timeout;     /* timeout on promise */
9735 +       unsigned                cb_version;     /* callback version */
9736 +       unsigned                cb_expiry;      /* callback expiry time */
9737 +       afs_callback_type_t     cb_type;        /* type of callback */
9738 +};
9739 +
9740 +static inline afs_vnode_t *AFS_FS_I(struct inode *inode)
9741 +{
9742 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
9743 +       return list_entry(inode,afs_vnode_t,vfs_inode);
9744 +#else
9745 +       return inode->u.generic_ip;
9746 +#endif
9747 +}
9748 +
9749 +static inline struct inode *AFS_VNODE_TO_I(afs_vnode_t *vnode)
9750 +{
9751 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
9752 +       return &vnode->vfs_inode;
9753 +#else
9754 +       return vnode->inode;
9755 +#endif
9756 +}
9757 +
9758 +extern int afs_vnode_fetch_status(afs_vnode_t *vnode);
9759 +
9760 +extern int afs_vnode_fetch_data(afs_vnode_t *vnode, struct afs_rxfs_fetch_descriptor *desc);
9761 +
9762 +extern int afs_vnode_give_up_callback(afs_vnode_t *vnode);
9763 +
9764 +extern struct afs_timer_ops afs_vnode_cb_timed_out_ops;
9765 +
9766 +#endif /* __KERNEL__ */
9767 +
9768 +#endif /* _LINUX_AFS_VNODE_H */
9769 diff -urNp linux-5240/fs/afs/volume.c linux-5250/fs/afs/volume.c
9770 --- linux-5240/fs/afs/volume.c  1970-01-01 01:00:00.000000000 +0100
9771 +++ linux-5250/fs/afs/volume.c  
9772 @@ -0,0 +1,434 @@
9773 +/* volume.c: AFS volume management
9774 + *
9775 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
9776 + * Written by David Howells (dhowells@redhat.com)
9777 + *
9778 + * This program is free software; you can redistribute it and/or
9779 + * modify it under the terms of the GNU General Public License
9780 + * as published by the Free Software Foundation; either version
9781 + * 2 of the License, or (at your option) any later version.
9782 + */
9783 +
9784 +#include <linux/kernel.h>
9785 +#include <linux/module.h>
9786 +#include <linux/init.h>
9787 +#include <linux/slab.h>
9788 +#include <linux/fs.h>
9789 +#include <linux/pagemap.h>
9790 +#include "volume.h"
9791 +#include "cell.h"
9792 +#include "cmservice.h"
9793 +#include "fsclient.h"
9794 +#include "vlclient.h"
9795 +#include "cache.h"
9796 +#include "internal.h"
9797 +
9798 +const char *afs_voltypes[] = { "R/W", "R/O", "BAK" };
9799 +
9800 +/*****************************************************************************/
9801 +/*
9802 + * lookup a volume by name
9803 + * - this can be one of the following:
9804 + *     "%[cell:]volume[.]"             R/W volume
9805 + *     "#[cell:]volume[.]"             R/O or R/W volume (rwparent=0), or R/W (rwparent=1) volume
9806 + *     "%[cell:]volume.readonly"       R/O volume
9807 + *     "#[cell:]volume.readonly"       R/O volume
9808 + *     "%[cell:]volume.backup"         Backup volume
9809 + *     "#[cell:]volume.backup"         Backup volume
9810 + *
9811 + * The cell name is optional, and defaults to the current cell.
9812 + *
9813 + * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin Guide
9814 + * - Rule 1: Explicit type suffix forces access of that type or nothing
9815 + *           (no suffix, then use Rule 2 & 3)
9816 + * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W if not available
9817 + * - Rule 3: If parent volume is R/W, then only mount R/W volume unless explicitly told otherwise
9818 + */
9819 +int afs_volume_lookup(afs_cache_t *cache, char *name, int rwparent, afs_volume_t **_volume)
9820 +{
9821 +       afs_vlocation_t *vlocation = NULL;
9822 +       afs_voltype_t type;
9823 +       afs_volume_t *volume = NULL;
9824 +       afs_cell_t *cell = NULL;
9825 +       char *cellname, *volname, *suffix;
9826 +       char srvtmask;
9827 +       int force, ret, loop;
9828 +
9829 +       _enter(",%s,",name);
9830 +
9831 +       if (!name || (name[0]!='%' && name[0]!='#') || !name[1]) {
9832 +               printk("kAFS: unparsable volume name\n");
9833 +               return -EINVAL;
9834 +       }
9835 +
9836 +       /* determine the type of volume we're looking for */
9837 +       force = 0;
9838 +       type = AFSVL_ROVOL;
9839 +
9840 +       if (rwparent || name[0]=='%') {
9841 +               type = AFSVL_RWVOL;
9842 +               force = 1;
9843 +       }
9844 +
9845 +       suffix = strrchr(name,'.');
9846 +       if (suffix) {
9847 +               if (strcmp(suffix,".readonly")==0) {
9848 +                       type = AFSVL_ROVOL;
9849 +                       force = 1;
9850 +               }
9851 +               else if (strcmp(suffix,".backup")==0) {
9852 +                       type = AFSVL_BACKVOL;
9853 +                       force = 1;
9854 +               }
9855 +               else if (suffix[1]==0) {
9856 +                       *suffix = 0;
9857 +                       suffix = NULL;
9858 +               }
9859 +               else {
9860 +                       suffix = NULL;
9861 +               }
9862 +       }
9863 +
9864 +       /* split the cell and volume names */
9865 +       name++;
9866 +       volname = strchr(name,':');
9867 +       if (volname) {
9868 +               *volname++ = 0;
9869 +               cellname = name;
9870 +       }
9871 +       else {
9872 +               volname = name;
9873 +               cellname = NULL;
9874 +       }
9875 +
9876 +       _debug("CELL:%s VOLUME:%s SUFFIX:%s TYPE:%d%s",
9877 +              cellname,volname,suffix?:"-",type,force?" FORCE":"");
9878 +
9879 +       /* lookup the cell record */
9880 +       ret = afs_cell_lookup(cache,cellname,&cell);
9881 +       if (ret<0)
9882 +               printk("kAFS: unable to lookup cell '%s'\n",cellname?:"");
9883 +
9884 +       if (cellname) volname[-1] = ':';
9885 +       if (ret<0)
9886 +               goto error;
9887 +
9888 +       /* lookup the volume location record */
9889 +       if (suffix) *suffix = 0;
9890 +       ret = afs_vlocation_lookup(cache,cell,volname,&vlocation);
9891 +       if (suffix) *suffix = '.';
9892 +       if (ret<0)
9893 +               goto error;
9894 +
9895 +       /* make the final decision on the type we want */
9896 +       ret = -ENOMEDIUM;
9897 +       if (force && !(vlocation->vldb.vidmask & (1<<type)))
9898 +               goto error;
9899 +
9900 +       srvtmask = 0;
9901 +       for (loop=0; loop<vlocation->vldb.nservers; loop++)
9902 +               srvtmask |= vlocation->vldb.srvtmask[loop];
9903 +
9904 +       if (force) {
9905 +               if (!(srvtmask & (1 <<type)))
9906 +                       goto error;
9907 +       }
9908 +       else if (srvtmask & AFS_CACHE_VOL_STM_RO) {
9909 +               type = AFSVL_ROVOL;
9910 +       }
9911 +       else if (srvtmask & AFS_CACHE_VOL_STM_RW) {
9912 +               type = AFSVL_RWVOL;
9913 +       }
9914 +       else {
9915 +               goto error;
9916 +       }
9917 +
9918 +       down_write(&cell->vl_sem);
9919 +
9920 +       /* is the volume already active? */
9921 +       if (vlocation->vols[type]) {
9922 +               /* yes - re-use it */
9923 +               volume = vlocation->vols[type];
9924 +               afs_get_volume(volume);
9925 +               goto success;
9926 +       }
9927 +
9928 +       /* create a new volume record */
9929 +       _debug("creating new volume record");
9930 +
9931 +       ret = -ENOMEM;
9932 +       volume = kmalloc(sizeof(afs_volume_t),GFP_KERNEL);
9933 +       if (!volume)
9934 +               goto error_up;
9935 +
9936 +       memset(volume,0,sizeof(afs_volume_t));
9937 +       atomic_set(&volume->usage,1);
9938 +       volume->type = type;
9939 +       volume->type_force = force;
9940 +       volume->cell = cell;
9941 +       volume->cix = cell->cache_ix;
9942 +       volume->vid = vlocation->vldb.vid[type];
9943 +
9944 +       volume->vix.index = (vlocation->vix.index << 2) | type;
9945 +
9946 +       init_rwsem(&volume->server_sem);
9947 +
9948 +       /* look up all the applicable server records */
9949 +       for (loop=0; loop<8; loop++) {
9950 +               if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) {
9951 +                       ret = afs_server_lookup(volume->cell,
9952 +                                               &vlocation->vldb.servers[loop],
9953 +                                               &volume->servers[volume->nservers]);
9954 +                       if (ret<0)
9955 +                               goto error_discard;
9956 +
9957 +                       volume->nservers++;
9958 +               }
9959 +       }
9960 +
9961 +       /* attach the cache and volume location */
9962 +#if 0
9963 +       afs_get_cache(cache);           volume->cache = cache;
9964 +#endif
9965 +       afs_get_vlocation(vlocation);   volume->vlocation = vlocation;
9966 +
9967 +       vlocation->vols[type] = volume;
9968 +
9969 + success:
9970 +       _debug("kAFS selected %s volume %08x",afs_voltypes[volume->type],volume->vid);
9971 +       *_volume = volume;
9972 +       ret = 0;
9973 +
9974 +       /* clean up */
9975 + error_up:
9976 +       up_write(&cell->vl_sem);
9977 + error:
9978 +       if (vlocation)  afs_put_vlocation(vlocation);
9979 +       if (cell)       afs_put_cell(cell);
9980 +
9981 +       _leave(" = %d (%p)",ret,volume);
9982 +       return ret;
9983 +
9984 + error_discard:
9985 +       up_write(&cell->vl_sem);
9986 +
9987 +       for (loop=volume->nservers-1; loop>=0; loop--)
9988 +               if (volume->servers[loop])
9989 +                       afs_put_server(volume->servers[loop]);
9990 +
9991 +       kfree(volume);
9992 +       goto error;
9993 +} /* end afs_volume_lookup() */
9994 +
9995 +/*****************************************************************************/
9996 +/*
9997 + * destroy a volume record
9998 + */
9999 +void afs_put_volume(afs_volume_t *volume)
10000 +{
10001 +       afs_vlocation_t *vlocation;
10002 +       int loop;
10003 +
10004 +       _enter("%p",volume);
10005 +
10006 +       vlocation = volume->vlocation;
10007 +
10008 +       /* sanity check */
10009 +       if (atomic_read(&volume->usage)<=0)
10010 +               BUG();
10011 +
10012 +       /* to prevent a race, the decrement and the dequeue must be effectively atomic */
10013 +       down_write(&vlocation->cell->vl_sem);
10014 +
10015 +       if (likely(!atomic_dec_and_test(&volume->usage))) {
10016 +               up_write(&vlocation->cell->vl_sem);
10017 +               _leave("");
10018 +               return;
10019 +       }
10020 +
10021 +       vlocation->vols[volume->type] = NULL;
10022 +
10023 +       up_write(&vlocation->cell->vl_sem);
10024 +
10025 +       afs_put_vlocation(vlocation);
10026 +
10027 +       /* finish cleaning up the volume */
10028 +#if 0
10029 +       if (volume->cache)      afs_put_cache(volume->cache);
10030 +#endif
10031 +
10032 +       for (loop=volume->nservers-1; loop>=0; loop--)
10033 +               if (volume->servers[loop])
10034 +                       afs_put_server(volume->servers[loop]);
10035 +
10036 +       kfree(volume);
10037 +
10038 +       _leave(" [destroyed]");
10039 +} /* end afs_put_volume() */
10040 +
10041 +/*****************************************************************************/
10042 +/*
10043 + * pick a server to use to try accessing this volume
10044 + * - returns with an elevated usage count on the server chosen
10045 + */
10046 +int afs_volume_pick_fileserver(afs_volume_t *volume, afs_server_t **_server)
10047 +{
10048 +       afs_server_t *server;
10049 +       int ret, state, loop;
10050 +
10051 +       _enter("%s",volume->vlocation->vldb.name);
10052 +
10053 +       down_read(&volume->server_sem);
10054 +
10055 +       /* handle the no-server case */
10056 +       if (volume->nservers==0) {
10057 +               ret = volume->rjservers ? -ENOMEDIUM : -ESTALE;
10058 +               up_read(&volume->server_sem);
10059 +               _leave(" = %d [no servers]",ret);
10060 +               return ret;
10061 +       }
10062 +
10063 +       /* basically, just search the list for the first live server and use that */
10064 +       ret = 0;
10065 +       for (loop=0; loop<volume->nservers; loop++) {
10066 +               server = volume->servers[loop];
10067 +               state = server->fs_state;
10068 +
10069 +               switch (state) {
10070 +                       /* found an apparently healthy server */
10071 +               case 0:
10072 +                       afs_get_server(server);
10073 +                       up_read(&volume->server_sem);
10074 +                       *_server = server;
10075 +                       _leave(" = 0 (picked %08x)",ntohl(server->addr.s_addr));
10076 +                       return 0;
10077 +
10078 +               case -ENETUNREACH:
10079 +                       if (ret==0)
10080 +                               ret = state;
10081 +                       break;
10082 +
10083 +               case -EHOSTUNREACH:
10084 +                       if (ret==0 || ret==-ENETUNREACH)
10085 +                               ret = state;
10086 +                       break;
10087 +
10088 +               case -ECONNREFUSED:
10089 +                       if (ret==0 || ret==-ENETUNREACH || ret==-EHOSTUNREACH)
10090 +                               ret = state;
10091 +                       break;
10092 +
10093 +               default:
10094 +               case -EREMOTEIO:
10095 +                       if (ret==0 ||
10096 +                           ret==-ENETUNREACH ||
10097 +                           ret==-EHOSTUNREACH ||
10098 +                           ret==-ECONNREFUSED)
10099 +                               ret = state;
10100 +                       break;
10101 +               }
10102 +       }
10103 +
10104 +       /* no available servers
10105 +        * - TODO: handle the no active servers case better
10106 +        */
10107 +       up_read(&volume->server_sem);
10108 +       _leave(" = %d",ret);
10109 +       return ret;
10110 +} /* end afs_volume_pick_fileserver() */
10111 +
10112 +/*****************************************************************************/
10113 +/*
10114 + * release a server after use
10115 + * - releases the ref on the server struct that was acquired by picking
10116 + * - records result of using a particular server to access a volume
10117 + * - return 0 to try again, 1 if okay or to issue error
10118 + */
10119 +int afs_volume_release_fileserver(afs_volume_t *volume, afs_server_t *server, int result)
10120 +{
10121 +       unsigned loop;
10122 +
10123 +       _enter("%s,%08x,%d",volume->vlocation->vldb.name,ntohl(server->addr.s_addr),result);
10124 +
10125 +       switch (result) {
10126 +               /* success */
10127 +       case 0:
10128 +               server->fs_act_jif = jiffies;
10129 +               break;
10130 +
10131 +               /* the fileserver denied all knowledge of the volume */
10132 +       case -ENOMEDIUM:
10133 +               server->fs_act_jif = jiffies;
10134 +               down_write(&volume->server_sem);
10135 +
10136 +               /* first, find where the server is in the active list (if it is) */
10137 +               for (loop=0; loop<volume->nservers; loop++)
10138 +                       if (volume->servers[loop]==server)
10139 +                               goto present;
10140 +
10141 +               /* no longer there - may have been discarded by another op */
10142 +               goto try_next_server_upw;
10143 +
10144 +       present:
10145 +               volume->nservers--;
10146 +               memmove(&volume->servers[loop],
10147 +                       &volume->servers[loop+1],
10148 +                       sizeof(volume->servers[loop]) * (volume->nservers - loop)
10149 +                       );
10150 +               volume->servers[volume->nservers] = NULL;
10151 +               afs_put_server(server);
10152 +               volume->rjservers++;
10153 +
10154 +               if (volume->nservers>0)
10155 +                       /* another server might acknowledge its existence */
10156 +                       goto try_next_server_upw;
10157 +
10158 +               /* handle the case where all the fileservers have rejected the volume
10159 +                * - TODO: try asking the fileservers for volume information
10160 +                * - TODO: contact the VL server again to see if the volume is no longer registered
10161 +                */
10162 +               up_write(&volume->server_sem);
10163 +               afs_put_server(server);
10164 +               _leave(" [completely rejected]");
10165 +               return 1;
10166 +
10167 +               /* problem reaching the server */
10168 +       case -ENETUNREACH:
10169 +       case -EHOSTUNREACH:
10170 +       case -ECONNREFUSED:
10171 +       case -ETIMEDOUT:
10172 +       case -EREMOTEIO:
10173 +               /* mark the server as dead
10174 +                * TODO: vary dead timeout depending on error
10175 +                */
10176 +               spin_lock(&server->fs_lock);
10177 +               if (!server->fs_state) {
10178 +                       server->fs_dead_jif = jiffies + HZ * 10;
10179 +                       server->fs_state = result;
10180 +                       printk("kAFS: SERVER DEAD state=%d\n",result);
10181 +               }
10182 +               spin_unlock(&server->fs_lock);
10183 +               goto try_next_server;
10184 +
10185 +               /* miscellaneous error */
10186 +       default:
10187 +               server->fs_act_jif = jiffies;
10188 +       case -ENOMEM:
10189 +       case -ENONET:
10190 +               break;
10191 +       }
10192 +
10193 +       /* tell the caller to accept the result */
10194 +       afs_put_server(server);
10195 +       _leave("");
10196 +       return 1;
10197 +
10198 +       /* tell the caller to loop around and try the next server */
10199 + try_next_server_upw:
10200 +       up_write(&volume->server_sem);
10201 + try_next_server:
10202 +       afs_put_server(server);
10203 +       _leave(" [try next server]");
10204 +       return 0;
10205 +
10206 +} /* end afs_volume_release_fileserver() */
10207 diff -urNp linux-5240/fs/afs/volume.h linux-5250/fs/afs/volume.h
10208 --- linux-5240/fs/afs/volume.h  1970-01-01 01:00:00.000000000 +0100
10209 +++ linux-5250/fs/afs/volume.h  
10210 @@ -0,0 +1,102 @@
10211 +/* volume.h: AFS volume management
10212 + *
10213 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10214 + * Written by David Howells (dhowells@redhat.com)
10215 + *
10216 + * This program is free software; you can redistribute it and/or
10217 + * modify it under the terms of the GNU General Public License
10218 + * as published by the Free Software Foundation; either version
10219 + * 2 of the License, or (at your option) any later version.
10220 + */
10221 +
10222 +#ifndef _LINUX_AFS_VOLUME_H
10223 +#define _LINUX_AFS_VOLUME_H
10224 +
10225 +#include "types.h"
10226 +#include "cache-layout.h"
10227 +#include "fsclient.h"
10228 +#include "kafstimod.h"
10229 +#include "kafsasyncd.h"
10230 +
10231 +#define __packed __attribute__((packed))
10232 +
10233 +typedef enum {
10234 +       AFS_VLUPD_SLEEP,                /* sleeping waiting for update timer to fire */
10235 +       AFS_VLUPD_PENDING,              /* on pending queue */
10236 +       AFS_VLUPD_INPROGRESS,           /* op in progress */
10237 +       AFS_VLUPD_BUSYSLEEP,            /* sleeping because server returned EBUSY */
10238 +       
10239 +} __attribute__((packed)) afs_vlocation_upd_t;
10240 +
10241 +/*****************************************************************************/
10242 +/*
10243 + * AFS volume location record
10244 + */
10245 +struct afs_vlocation
10246 +{
10247 +       atomic_t                usage;
10248 +       struct list_head        link;           /* link in cell volume location list */
10249 +       afs_timer_t             timeout;        /* decaching timer */
10250 +       afs_cell_t              *cell;          /* cell to which volume belongs */
10251 +#if 0
10252 +       afs_cache_t             *cache;         /* backing cache */
10253 +#endif
10254 +       afs_cache_volix_t       vix;            /* volume index in this cache */
10255 +       struct afs_cache_volume vldb;           /* volume information DB record */
10256 +       struct afs_volume       *vols[3];       /* volume access record pointer (index by type) */
10257 +       rwlock_t                lock;           /* access lock */
10258 +       unsigned long           read_jif;       /* time at which last read from vlserver */
10259 +       afs_timer_t             upd_timer;      /* update timer */
10260 +       afs_async_op_t          upd_op;         /* update operation */
10261 +       afs_vlocation_upd_t     upd_state;      /* update state */
10262 +       unsigned short          upd_first_svix; /* first server index during update */
10263 +       unsigned short          upd_curr_svix;  /* current server index during update */
10264 +       unsigned short          upd_rej_cnt;    /* ENOMEDIUM count during update */
10265 +       unsigned short          upd_busy_cnt;   /* EBUSY count during update */
10266 +       unsigned short          valid;          /* T if valid */
10267 +};
10268 +
10269 +extern int afs_vlocation_lookup(afs_cache_t *cache, afs_cell_t *cell, const char *name,
10270 +                               afs_vlocation_t **_vlocation);
10271 +
10272 +#define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0)
10273 +
10274 +extern void __afs_put_vlocation(afs_vlocation_t *vlocation);
10275 +extern void afs_put_vlocation(afs_vlocation_t *vlocation);
10276 +extern void afs_vlocation_do_timeout(afs_vlocation_t *vlocation);
10277 +
10278 +/*****************************************************************************/
10279 +/*
10280 + * AFS volume access record
10281 + */
10282 +struct afs_volume
10283 +{
10284 +       atomic_t                usage;
10285 +       afs_cell_t              *cell;          /* cell to which belongs (unrefd ptr) */
10286 +       afs_vlocation_t         *vlocation;     /* volume location */
10287 +       afs_volid_t             vid;            /* volume ID */
10288 +       afs_voltype_t __packed  type;           /* type of volume */
10289 +       char                    type_force;     /* force volume type (suppress R/O -> R/W) */
10290 +#if 0
10291 +       afs_cache_t             *cache;         /* backing cache */
10292 +#endif
10293 +       afs_cache_cellix_t      cix;            /* cell index in this cache */
10294 +       afs_cache_volix_t       vix;            /* volume index in this cache */
10295 +
10296 +       unsigned short          nservers;       /* number of server slots filled */
10297 +       unsigned short          rjservers;      /* number of servers discarded due to -ENOMEDIUM */
10298 +       afs_server_t            *servers[8];    /* servers on which volume resides (ordered) */
10299 +       struct rw_semaphore     server_sem;     /* lock for accessing current server */
10300 +};
10301 +
10302 +extern int afs_volume_lookup(afs_cache_t *cache, char *name, int ro, afs_volume_t **_volume);
10303 +
10304 +#define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
10305 +
10306 +extern void afs_put_volume(afs_volume_t *volume);
10307 +
10308 +extern int afs_volume_pick_fileserver(afs_volume_t *volume, afs_server_t **_server);
10309 +
10310 +extern int afs_volume_release_fileserver(afs_volume_t *volume, afs_server_t *server, int result);
10311 +
10312 +#endif /* _LINUX_AFS_VOLUME_H */
10313 diff -urNp linux-5240/fs/Config.in linux-5250/fs/Config.in
10314 --- linux-5240/fs/Config.in     
10315 +++ linux-5250/fs/Config.in     
10316 @@ -23,6 +23,7 @@ dep_mbool '  Enable reiserfs debug mode'
10317  dep_mbool '  Stats in /proc/fs/reiserfs' CONFIG_REISERFS_PROC_INFO $CONFIG_REISERFS_FS
10318  
10319  dep_tristate 'ADFS file system support (EXPERIMENTAL)' CONFIG_ADFS_FS $CONFIG_EXPERIMENTAL
10320 +dep_tristate 'AFS distributed file system support' CONFIG_AFS_FS $CONFIG_EXPERIMENTAL
10321  dep_mbool '  ADFS write support (DANGEROUS)' CONFIG_ADFS_FS_RW $CONFIG_ADFS_FS $CONFIG_EXPERIMENTAL
10322  
10323  dep_tristate 'Amiga FFS file system support (EXPERIMENTAL)' CONFIG_AFFS_FS $CONFIG_EXPERIMENTAL
10324 diff -urNp linux-5240/fs/Makefile linux-5250/fs/Makefile
10325 --- linux-5240/fs/Makefile      
10326 +++ linux-5250/fs/Makefile      
10327 @@ -67,6 +67,7 @@ subdir-$(CONFIG_UDF_FS)               += udf
10328  subdir-$(CONFIG_AUTOFS_FS)     += autofs
10329  subdir-$(CONFIG_AUTOFS4_FS)    += autofs4
10330  subdir-$(CONFIG_ADFS_FS)       += adfs
10331 +subdir-$(CONFIG_AFS_FS)                += afs
10332  subdir-$(CONFIG_REISERFS_FS)   += reiserfs
10333  subdir-$(CONFIG_DEVPTS_FS)     += devpts
10334  subdir-$(CONFIG_SUN_OPENPROMFS)        += openpromfs
10335 diff -urNp linux-5240/include/rxrpc/call.h linux-5250/include/rxrpc/call.h
10336 --- linux-5240/include/rxrpc/call.h     1970-01-01 01:00:00.000000000 +0100
10337 +++ linux-5250/include/rxrpc/call.h     
10338 @@ -0,0 +1,218 @@
10339 +/* call.h: Rx call record
10340 + *
10341 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10342 + * Written by David Howells (dhowells@redhat.com)
10343 + *
10344 + * This program is free software; you can redistribute it and/or
10345 + * modify it under the terms of the GNU General Public License
10346 + * as published by the Free Software Foundation; either version
10347 + * 2 of the License, or (at your option) any later version.
10348 + */
10349 +
10350 +#ifndef _LINUX_RXRPC_CALL_H
10351 +#define _LINUX_RXRPC_CALL_H
10352 +
10353 +#include <rxrpc/types.h>
10354 +#include <rxrpc/rxrpc.h>
10355 +#include <rxrpc/packet.h>
10356 +#include <linux/timer.h>
10357 +
10358 +#define RXRPC_CALL_ACK_WINDOW_SIZE     16
10359 +
10360 +extern unsigned rxrpc_call_rcv_timeout;                /* receive activity timeout (secs) */
10361 +extern unsigned rxrpc_call_acks_timeout;       /* pending ACK (retransmit) timeout (secs) */
10362 +extern unsigned rxrpc_call_dfr_ack_timeout;    /* deferred ACK timeout (secs) */
10363 +extern unsigned short rxrpc_call_max_resend;           /* maximum consecutive resend count */
10364 +
10365 +/* application call state
10366 + * - only state 0 and ffff are reserved, the state is set to 1 after an opid is received
10367 + */
10368 +enum rxrpc_app_cstate {
10369 +       RXRPC_CSTATE_COMPLETE           = 0,    /* operation complete */
10370 +       RXRPC_CSTATE_ERROR,                     /* operation ICMP error or aborted */
10371 +       RXRPC_CSTATE_SRVR_RCV_OPID,             /* [SERVER] receiving operation ID */
10372 +       RXRPC_CSTATE_SRVR_RCV_ARGS,             /* [SERVER] receiving operation data */
10373 +       RXRPC_CSTATE_SRVR_GOT_ARGS,             /* [SERVER] completely received operation data */
10374 +       RXRPC_CSTATE_SRVR_SND_REPLY,            /* [SERVER] sending operation reply */
10375 +       RXRPC_CSTATE_SRVR_RCV_FINAL_ACK,        /* [SERVER] receiving final ACK */
10376 +       RXRPC_CSTATE_CLNT_SND_ARGS,             /* [CLIENT] sending operation args */
10377 +       RXRPC_CSTATE_CLNT_RCV_REPLY,            /* [CLIENT] receiving operation reply */
10378 +       RXRPC_CSTATE_CLNT_GOT_REPLY,            /* [CLIENT] completely received operation reply */
10379 +} __attribute__((packed));
10380 +
10381 +extern const char *rxrpc_call_states[];
10382 +
10383 +enum rxrpc_app_estate {
10384 +       RXRPC_ESTATE_NO_ERROR           = 0,    /* no error */
10385 +       RXRPC_ESTATE_LOCAL_ABORT,               /* aborted locally by application layer */
10386 +       RXRPC_ESTATE_PEER_ABORT,                /* aborted remotely by peer */
10387 +       RXRPC_ESTATE_LOCAL_ERROR,               /* local ICMP network error */
10388 +       RXRPC_ESTATE_REMOTE_ERROR,              /* remote ICMP network error */
10389 +} __attribute__((packed));
10390 +
10391 +extern const char *rxrpc_call_error_states[];
10392 +
10393 +/*****************************************************************************/
10394 +/*
10395 + * Rx call record and application scratch buffer
10396 + * - the call record occupies the bottom of a complete page
10397 + * - the application scratch buffer occupies the rest
10398 + */
10399 +struct rxrpc_call
10400 +{
10401 +       atomic_t                usage;
10402 +       struct rxrpc_connection *conn;          /* connection upon which active */
10403 +       spinlock_t              lock;           /* access lock */
10404 +       struct module           *owner;         /* owner module */
10405 +       wait_queue_head_t       waitq;          /* wait queue for events to happen */
10406 +       struct list_head        link;           /* general internal list link */
10407 +       struct list_head        call_link;      /* master call list link */
10408 +       u32                     chan_ix;        /* connection channel index (net order) */
10409 +       u32                     call_id;        /* call ID on connection (net order) */
10410 +       unsigned long           cjif;           /* jiffies at call creation */
10411 +       unsigned long           flags;          /* control flags */
10412 +#define RXRPC_CALL_ACKS_TIMO   0x00000001      /* ACKS timeout reached */
10413 +#define RXRPC_CALL_ACKR_TIMO   0x00000002      /* ACKR timeout reached */
10414 +#define RXRPC_CALL_RCV_TIMO    0x00000004      /* RCV timeout reached */
10415 +#define RXRPC_CALL_RCV_PKT     0x00000008      /* received packet */
10416 +
10417 +       /* transmission */
10418 +       rxrpc_seq_t             snd_seq_count;  /* outgoing packet sequence number counter */
10419 +       struct rxrpc_message    *snd_nextmsg;   /* next message being constructed for sending */
10420 +       struct rxrpc_message    *snd_ping;      /* last ping message sent */
10421 +       unsigned short          snd_resend_cnt; /* count of resends since last ACK */
10422 +
10423 +       /* transmission ACK tracking */
10424 +       struct list_head        acks_pendq;     /* messages pending ACK (ordered by seq) */
10425 +       unsigned                acks_pend_cnt;  /* number of un-ACK'd packets */
10426 +       rxrpc_seq_t             acks_dftv_seq;  /* highest definitively ACK'd msg seq */
10427 +       struct timer_list       acks_timeout;   /* timeout on expected ACK */
10428 +
10429 +       /* reception */
10430 +       struct list_head        rcv_receiveq;   /* messages pending reception (ordered by seq) */
10431 +       struct list_head        rcv_krxiodq_lk; /* krxiod queue for new inbound packets */
10432 +       struct timer_list       rcv_timeout;    /* call receive activity timeout */
10433 +
10434 +       /* reception ACK'ing */
10435 +       rxrpc_seq_t             ackr_win_bot;   /* bottom of ACK window */
10436 +       rxrpc_seq_t             ackr_win_top;   /* top of ACK window */
10437 +       rxrpc_seq_t             ackr_high_seq;  /* highest seqno yet received */
10438 +       rxrpc_seq_t             ackr_prev_seq;  /* previous seqno received */
10439 +       unsigned                ackr_pend_cnt;  /* number of pending ACKs */
10440 +       struct timer_list       ackr_dfr_timo;  /* timeout on deferred ACK */
10441 +       char                    ackr_dfr_perm;  /* request for deferred ACKs permitted */
10442 +       rxrpc_seq_t             ackr_dfr_seq;   /* seqno for deferred ACK */
10443 +       struct rxrpc_ackpacket  ackr;           /* pending normal ACK packet */
10444 +       u8                      ackr_array[RXRPC_CALL_ACK_WINDOW_SIZE]; /* ACK records */
10445 +
10446 +       /* presentation layer */
10447 +       char                    app_last_rcv;   /* T if received last packet from remote end */
10448 +       enum rxrpc_app_cstate   app_call_state; /* call state */
10449 +       enum rxrpc_app_estate   app_err_state;  /* abort/error state */
10450 +       struct list_head        app_readyq;     /* ordered ready received packet queue */
10451 +       struct list_head        app_unreadyq;   /* ordered post-hole recv'd packet queue */
10452 +       rxrpc_seq_t             app_ready_seq;  /* last seq number dropped into readyq */
10453 +       size_t                  app_ready_qty;  /* amount of data ready in readyq */
10454 +       unsigned                app_opcode;     /* operation ID */
10455 +       unsigned                app_abort_code; /* abort code (when aborted) */
10456 +       int                     app_errno;      /* error number (when ICMP error received) */
10457 +
10458 +       /* statisics */
10459 +       unsigned                pkt_rcv_count;  /* count of received packets on this call */
10460 +       unsigned                pkt_snd_count;  /* count of sent packets on this call */
10461 +       unsigned                app_read_count; /* number of reads issued */
10462 +
10463 +       /* bits for the application to use */
10464 +       rxrpc_call_attn_func_t  app_attn_func;  /* callback when attention required */
10465 +       rxrpc_call_error_func_t app_error_func; /* callback when abort sent (cleanup and put) */
10466 +       rxrpc_call_aemap_func_t app_aemap_func; /* callback to map abort code to/from errno */
10467 +       void                    *app_user;      /* application data */
10468 +       struct list_head        app_link;       /* application list linkage */
10469 +       struct list_head        app_attn_link;  /* application attention list linkage */
10470 +       size_t                  app_mark;       /* trigger callback when app_ready_qty>=app_mark */
10471 +       char                    app_async_read; /* T if in async-read mode */
10472 +       u8                      *app_read_buf;  /* application async read buffer (app_mark size) */
10473 +       u8                      *app_scr_alloc; /* application scratch allocation pointer */
10474 +       void                    *app_scr_ptr;   /* application pointer into scratch buffer */
10475 +
10476 +#define RXRPC_APP_MARK_EOF 0xFFFFFFFFU /* mark at end of input */
10477 +
10478 +       /* application scratch buffer */
10479 +       u8              app_scratch[0] __attribute__((aligned(sizeof(long))));
10480 +};
10481 +
10482 +#define RXRPC_CALL_SCRATCH_SIZE (PAGE_SIZE - sizeof(struct rxrpc_call))
10483 +
10484 +#define rxrpc_call_reset_scratch(CALL) \
10485 +do { (CALL)->app_scr_alloc = (CALL)->app_scratch; } while(0)
10486 +
10487 +#define rxrpc_call_alloc_scratch(CALL,SIZE)                                            \
10488 +({                                                                                     \
10489 +       void *ptr;                                                                      \
10490 +       ptr = (CALL)->app_scr_alloc;                                                    \
10491 +       (CALL)->app_scr_alloc += (SIZE);                                                \
10492 +       if ((SIZE)>RXRPC_CALL_SCRATCH_SIZE ||                                           \
10493 +           (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) {  \
10494 +               printk("rxrpc_call_alloc_scratch(%p,%u)\n",(CALL),(SIZE));              \
10495 +               BUG();                                                                  \
10496 +       }                                                                               \
10497 +       ptr;                                                                            \
10498 +})
10499 +
10500 +#define rxrpc_call_alloc_scratch_s(CALL,TYPE)                                          \
10501 +({                                                                                     \
10502 +       size_t size = sizeof(TYPE);                                                     \
10503 +       TYPE *ptr;                                                                      \
10504 +       ptr = (TYPE*)(CALL)->app_scr_alloc;                                             \
10505 +       (CALL)->app_scr_alloc += size;                                                  \
10506 +       if (size>RXRPC_CALL_SCRATCH_SIZE ||                                             \
10507 +           (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) {  \
10508 +               printk("rxrpc_call_alloc_scratch(%p,%u)\n",(CALL),size);                \
10509 +               BUG();                                                                  \
10510 +       }                                                                               \
10511 +       ptr;                                                                            \
10512 +})
10513 +
10514 +#define rxrpc_call_is_ack_pending(CALL) ((CALL)->ackr.reason != 0)
10515 +
10516 +extern int rxrpc_create_call(struct rxrpc_connection *conn,
10517 +                            rxrpc_call_attn_func_t attn,
10518 +                            rxrpc_call_error_func_t error,
10519 +                            rxrpc_call_aemap_func_t aemap,
10520 +                            struct rxrpc_call **_call);
10521 +
10522 +extern int rxrpc_incoming_call(struct rxrpc_connection *conn,
10523 +                              struct rxrpc_message *msg,
10524 +                              struct rxrpc_call **_call);
10525 +
10526 +static inline void rxrpc_get_call(struct rxrpc_call *call)
10527 +{
10528 +       if (atomic_read(&call->usage)<=0)
10529 +               BUG();
10530 +       atomic_inc(&call->usage);
10531 +       /*printk("rxrpc_get_call(%p{u=%d})\n",(C),atomic_read(&(C)->usage));*/
10532 +}
10533 +
10534 +extern void rxrpc_put_call(struct rxrpc_call *call);
10535 +
10536 +extern void rxrpc_call_do_stuff(struct rxrpc_call *call);
10537 +
10538 +extern int rxrpc_call_abort(struct rxrpc_call *call, int error);
10539 +
10540 +#define RXRPC_CALL_READ_BLOCK  0x0001  /* block if not enough data and not yet EOF */
10541 +#define RXRPC_CALL_READ_ALL    0x0002  /* error if insufficient data received */
10542 +extern int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int flags);
10543 +
10544 +extern int rxrpc_call_write_data(struct rxrpc_call *call,
10545 +                                size_t sioc,
10546 +                                struct iovec siov[],
10547 +                                u8 rxhdr_flags,
10548 +                                int alloc_flags,
10549 +                                int dup_data,
10550 +                                size_t *size_sent);
10551 +
10552 +extern int rxrpc_call_flush(struct rxrpc_call *call);
10553 +
10554 +extern void rxrpc_call_handle_error(struct rxrpc_call *conn, int local, int errno);
10555 +
10556 +#endif /* _LINUX_RXRPC_CALL_H */
10557 diff -urNp linux-5240/include/rxrpc/connection.h linux-5250/include/rxrpc/connection.h
10558 --- linux-5240/include/rxrpc/connection.h       1970-01-01 01:00:00.000000000 +0100
10559 +++ linux-5250/include/rxrpc/connection.h       
10560 @@ -0,0 +1,83 @@
10561 +/* connection.h: Rx connection record
10562 + *
10563 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10564 + * Written by David Howells (dhowells@redhat.com)
10565 + *
10566 + * This program is free software; you can redistribute it and/or
10567 + * modify it under the terms of the GNU General Public License
10568 + * as published by the Free Software Foundation; either version
10569 + * 2 of the License, or (at your option) any later version.
10570 + */
10571 +
10572 +#ifndef _LINUX_RXRPC_CONNECTION_H
10573 +#define _LINUX_RXRPC_CONNECTION_H
10574 +
10575 +#include <rxrpc/types.h>
10576 +#include <rxrpc/krxtimod.h>
10577 +
10578 +struct sk_buff;
10579 +
10580 +/*****************************************************************************/
10581 +/*
10582 + * Rx connection
10583 + * - connections are matched by (rmt_port,rmt_addr,service_id,conn_id,clientflag)
10584 + * - connections only retain a refcount on the peer when they are active
10585 + * - connections with refcount==0 are inactive and reside in the peer's graveyard
10586 + */
10587 +struct rxrpc_connection
10588 +{
10589 +       atomic_t                usage;
10590 +       struct rxrpc_transport  *trans;         /* transport endpoint */
10591 +       struct rxrpc_peer       *peer;          /* peer from/to which connected */
10592 +       struct rxrpc_service    *service;       /* responsible service (inbound conns) */
10593 +       struct rxrpc_timer      timeout;        /* decaching timer */
10594 +       struct list_head        link;           /* link in peer's list */
10595 +       struct list_head        proc_link;      /* link in proc list */
10596 +       struct list_head        err_link;       /* link in ICMP error processing list */
10597 +       struct sockaddr_in      addr;           /* remote address */
10598 +       struct rxrpc_call       *channels[4];   /* channels (active calls) */
10599 +       wait_queue_head_t       chanwait;       /* wait for channel to become available */
10600 +       spinlock_t              lock;           /* access lock */
10601 +       struct timeval          atime;          /* last access time */
10602 +       size_t                  mtu_size;       /* MTU size for outbound messages */
10603 +       unsigned                call_counter;   /* call ID counter */
10604 +       rxrpc_serial_t          serial_counter; /* packet serial number counter */
10605 +
10606 +       /* the following should all be in net order */
10607 +       u32                     in_epoch;       /* peer's epoch */
10608 +       u32                     out_epoch;      /* my epoch */
10609 +       u32                     conn_id;        /* connection ID, appropriately shifted */
10610 +       u16                     service_id;     /* service ID */
10611 +       u8                      security_ix;    /* security ID */
10612 +       u8                      in_clientflag;  /* RXRPC_CLIENT_INITIATED if we are server */
10613 +       u8                      out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
10614 +};
10615 +
10616 +extern int rxrpc_create_connection(struct rxrpc_transport *trans,
10617 +                                  u16 port,
10618 +                                  u32 addr,
10619 +                                  unsigned short service_id,
10620 +                                  void *security,
10621 +                                  struct rxrpc_connection **_conn);
10622 +
10623 +extern int rxrpc_connection_lookup(struct rxrpc_peer *peer,
10624 +                                  struct rxrpc_message *msg,
10625 +                                  struct rxrpc_connection **_conn);
10626 +
10627 +static inline void rxrpc_get_connection(struct rxrpc_connection *conn)
10628 +{
10629 +       if (atomic_read(&conn->usage)<0)
10630 +               BUG();
10631 +       atomic_inc(&conn->usage);
10632 +       //printk("rxrpc_get_conn(%p{u=%d})\n",conn,atomic_read(&conn->usage));
10633 +}
10634 +
10635 +extern void rxrpc_put_connection(struct rxrpc_connection *conn);
10636 +
10637 +extern int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
10638 +                                         struct rxrpc_call *call,
10639 +                                         struct rxrpc_message *msg);
10640 +
10641 +extern void rxrpc_conn_handle_error(struct rxrpc_connection *conn, int local, int errno);
10642 +
10643 +#endif /* _LINUX_RXRPC_CONNECTION_H */
10644 diff -urNp linux-5240/include/rxrpc/krxiod.h linux-5250/include/rxrpc/krxiod.h
10645 --- linux-5240/include/rxrpc/krxiod.h   1970-01-01 01:00:00.000000000 +0100
10646 +++ linux-5250/include/rxrpc/krxiod.h   
10647 @@ -0,0 +1,27 @@
10648 +/* krxiod.h: Rx RPC I/O kernel thread interface
10649 + *
10650 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10651 + * Written by David Howells (dhowells@redhat.com)
10652 + *
10653 + * This program is free software; you can redistribute it and/or
10654 + * modify it under the terms of the GNU General Public License
10655 + * as published by the Free Software Foundation; either version
10656 + * 2 of the License, or (at your option) any later version.
10657 + */
10658 +
10659 +#ifndef _LINUX_RXRPC_KRXIOD_H
10660 +#define _LINUX_RXRPC_KRXIOD_H
10661 +
10662 +#include <rxrpc/types.h>
10663 +
10664 +extern int rxrpc_krxiod_init(void);
10665 +extern void rxrpc_krxiod_kill(void);
10666 +extern void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans);
10667 +extern void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans);
10668 +extern void rxrpc_krxiod_queue_peer(struct rxrpc_peer *peer);
10669 +extern void rxrpc_krxiod_dequeue_peer(struct rxrpc_peer *peer);
10670 +extern void rxrpc_krxiod_clear_peers(struct rxrpc_transport *trans);
10671 +extern void rxrpc_krxiod_queue_call(struct rxrpc_call *call);
10672 +extern void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call);
10673 +
10674 +#endif /* _LINUX_RXRPC_KRXIOD_H */
10675 diff -urNp linux-5240/include/rxrpc/krxsecd.h linux-5250/include/rxrpc/krxsecd.h
10676 --- linux-5240/include/rxrpc/krxsecd.h  1970-01-01 01:00:00.000000000 +0100
10677 +++ linux-5250/include/rxrpc/krxsecd.h  
10678 @@ -0,0 +1,22 @@
10679 +/* krxsecd.h: Rx RPC security kernel thread interface
10680 + *
10681 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10682 + * Written by David Howells (dhowells@redhat.com)
10683 + *
10684 + * This program is free software; you can redistribute it and/or
10685 + * modify it under the terms of the GNU General Public License
10686 + * as published by the Free Software Foundation; either version
10687 + * 2 of the License, or (at your option) any later version.
10688 + */
10689 +
10690 +#ifndef _LINUX_RXRPC_KRXSECD_H
10691 +#define _LINUX_RXRPC_KRXSECD_H
10692 +
10693 +#include <rxrpc/types.h>
10694 +
10695 +extern int rxrpc_krxsecd_init(void);
10696 +extern void rxrpc_krxsecd_kill(void);
10697 +extern void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans);
10698 +extern void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg);
10699 +
10700 +#endif /* _LINUX_RXRPC_KRXSECD_H */
10701 diff -urNp linux-5240/include/rxrpc/krxtimod.h linux-5250/include/rxrpc/krxtimod.h
10702 --- linux-5240/include/rxrpc/krxtimod.h 1970-01-01 01:00:00.000000000 +0100
10703 +++ linux-5250/include/rxrpc/krxtimod.h 
10704 @@ -0,0 +1,45 @@
10705 +/* krxtimod.h: RxRPC timeout daemon
10706 + *
10707 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10708 + * Written by David Howells (dhowells@redhat.com)
10709 + *
10710 + * This program is free software; you can redistribute it and/or
10711 + * modify it under the terms of the GNU General Public License
10712 + * as published by the Free Software Foundation; either version
10713 + * 2 of the License, or (at your option) any later version.
10714 + */
10715 +
10716 +#ifndef _LINUX_RXRPC_KRXTIMOD_H
10717 +#define _LINUX_RXRPC_KRXTIMOD_H
10718 +
10719 +#include <rxrpc/types.h>
10720 +
10721 +struct rxrpc_timer_ops {
10722 +       /* called when the front of the timer queue has timed out */
10723 +       void (*timed_out)(struct rxrpc_timer *timer);
10724 +};
10725 +
10726 +/*****************************************************************************/
10727 +/*
10728 + * RXRPC timer/timeout record
10729 + */
10730 +struct rxrpc_timer
10731 +{
10732 +       struct list_head                link;           /* link in timer queue */
10733 +       unsigned long                   timo_jif;       /* timeout time */
10734 +       const struct rxrpc_timer_ops    *ops;           /* timeout expiry function */
10735 +};
10736 +
10737 +static inline void rxrpc_timer_init(rxrpc_timer_t *timer, const struct rxrpc_timer_ops *ops)
10738 +{
10739 +       INIT_LIST_HEAD(&timer->link);
10740 +       timer->ops = ops;
10741 +}
10742 +
10743 +extern int rxrpc_krxtimod_start(void);
10744 +extern void rxrpc_krxtimod_kill(void);
10745 +
10746 +extern void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout);
10747 +extern int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer);
10748 +
10749 +#endif /* _LINUX_RXRPC_KRXTIMOD_H */
10750 diff -urNp linux-5240/include/rxrpc/message.h linux-5250/include/rxrpc/message.h
10751 --- linux-5240/include/rxrpc/message.h  1970-01-01 01:00:00.000000000 +0100
10752 +++ linux-5250/include/rxrpc/message.h  
10753 @@ -0,0 +1,72 @@
10754 +/* message.h: Rx message caching
10755 + *
10756 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10757 + * Written by David Howells (dhowells@redhat.com)
10758 + *
10759 + * This program is free software; you can redistribute it and/or
10760 + * modify it under the terms of the GNU General Public License
10761 + * as published by the Free Software Foundation; either version
10762 + * 2 of the License, or (at your option) any later version.
10763 + */
10764 +
10765 +#ifndef _H_3AD3363A_3A9C_11D6_83D8_0002B3163499
10766 +#define _H_3AD3363A_3A9C_11D6_83D8_0002B3163499
10767 +
10768 +#include <rxrpc/packet.h>
10769 +
10770 +/*****************************************************************************/
10771 +/*
10772 + * Rx message record
10773 + */
10774 +struct rxrpc_message
10775 +{
10776 +       atomic_t                usage;
10777 +       struct list_head        link;           /* list link */
10778 +       struct timeval          stamp;          /* time received or last sent */
10779 +       rxrpc_seq_t             seq;            /* message sequence number */
10780 +
10781 +       int                     state;          /* the state the message is currently in */
10782 +#define RXRPC_MSG_PREPARED     0
10783 +#define RXRPC_MSG_SENT         1
10784 +#define RXRPC_MSG_ACKED                2               /* provisionally ACK'd */
10785 +#define RXRPC_MSG_DONE         3               /* definitively ACK'd (msg->seq<ack.firstPacket) */
10786 +#define RXRPC_MSG_RECEIVED     4
10787 +#define RXRPC_MSG_ERROR                -1
10788 +       char                    rttdone;        /* used for RTT */
10789 +
10790 +       struct rxrpc_transport  *trans;         /* transport received through */
10791 +       struct rxrpc_connection *conn;          /* connection received over */
10792 +       struct sk_buff          *pkt;           /* received packet */
10793 +       off_t                   offset;         /* offset into pkt of next byte of data */
10794 +
10795 +       struct rxrpc_header     hdr;            /* message header */
10796 +
10797 +       int                     dcount;         /* data part count */
10798 +       size_t                  dsize;          /* data size */
10799 +#define RXRPC_MSG_MAX_IOCS 8
10800 +       struct iovec            data[RXRPC_MSG_MAX_IOCS]; /* message data */
10801 +       unsigned long           dfree;          /* bit mask indicating kfree(data[x]) if T */
10802 +};
10803 +
10804 +#define rxrpc_get_message(M) do { atomic_inc(&(M)->usage); } while(0)
10805 +
10806 +extern void __rxrpc_put_message(struct rxrpc_message *msg);
10807 +static inline void rxrpc_put_message(struct rxrpc_message *msg)
10808 +{
10809 +       if (atomic_read(&msg->usage)<=0)
10810 +               BUG();
10811 +       if (atomic_dec_and_test(&msg->usage))
10812 +               __rxrpc_put_message(msg);
10813 +}
10814 +
10815 +extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
10816 +                            struct rxrpc_call *call,
10817 +                            u8 type,
10818 +                            int count,
10819 +                            struct iovec diov[],
10820 +                            int alloc_flags,
10821 +                            struct rxrpc_message **_msg);
10822 +
10823 +extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
10824 +
10825 +#endif /* _H_3AD3363A_3A9C_11D6_83D8_0002B3163499 */
10826 diff -urNp linux-5240/include/rxrpc/packet.h linux-5250/include/rxrpc/packet.h
10827 --- linux-5240/include/rxrpc/packet.h   1970-01-01 01:00:00.000000000 +0100
10828 +++ linux-5250/include/rxrpc/packet.h   
10829 @@ -0,0 +1,128 @@
10830 +/* packet.h: Rx packet layout and definitions
10831 + *
10832 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10833 + * Written by David Howells (dhowells@redhat.com)
10834 + *
10835 + * This program is free software; you can redistribute it and/or
10836 + * modify it under the terms of the GNU General Public License
10837 + * as published by the Free Software Foundation; either version
10838 + * 2 of the License, or (at your option) any later version.
10839 + */
10840 +
10841 +#ifndef _LINUX_RXRPC_PACKET_H
10842 +#define _LINUX_RXRPC_PACKET_H
10843 +
10844 +#include <rxrpc/types.h>
10845 +
10846 +#define RXRPC_IPUDP_SIZE               28
10847 +extern size_t RXRPC_MAX_PACKET_SIZE;
10848 +#define RXRPC_MAX_PACKET_DATA_SIZE     (RXRPC_MAX_PACKET_SIZE - sizeof(struct rxrpc_header))
10849 +#define RXRPC_LOCAL_PACKET_SIZE                RXRPC_MAX_PACKET_SIZE
10850 +#define RXRPC_REMOTE_PACKET_SIZE       (576 - RXRPC_IPUDP_SIZE)
10851 +
10852 +/*****************************************************************************/
10853 +/*
10854 + * on-the-wire Rx packet header
10855 + * - all multibyte fields should be in network byte order
10856 + */
10857 +struct rxrpc_header
10858 +{
10859 +       u32     epoch;          /* client boot timestamp */
10860 +
10861 +       u32     cid;            /* connection and channel ID */
10862 +#define RXRPC_MAXCALLS         4                       /* max active calls per conn */
10863 +#define RXRPC_CHANNELMASK      (RXRPC_MAXCALLS-1)      /* mask for channel ID */
10864 +#define RXRPC_CIDMASK          (~RXRPC_CHANNELMASK)    /* mask for connection ID */
10865 +#define RXRPC_CIDSHIFT         2                       /* shift for connection ID */
10866 +
10867 +       u32     callNumber;     /* call ID (0 for connection-level packets) */
10868 +#define RXRPC_PROCESS_MAXCALLS (1<<2)  /* maximum number of active calls per conn (power of 2) */
10869 +
10870 +       u32     seq;            /* sequence number of pkt in call stream */
10871 +       u32     serial;         /* serial number of pkt sent to network */
10872 +
10873 +       u8      type;           /* packet type */
10874 +#define RXRPC_PACKET_TYPE_DATA         1       /* data */
10875 +#define RXRPC_PACKET_TYPE_ACK          2       /* ACK */
10876 +#define RXRPC_PACKET_TYPE_BUSY         3       /* call reject */
10877 +#define RXRPC_PACKET_TYPE_ABORT                4       /* call/connection abort */
10878 +#define RXRPC_PACKET_TYPE_ACKALL       5       /* ACK all outstanding packets on call */
10879 +#define RXRPC_PACKET_TYPE_CHALLENGE    6       /* connection security challenge (SRVR->CLNT) */
10880 +#define RXRPC_PACKET_TYPE_RESPONSE     7       /* connection secutity response (CLNT->SRVR) */
10881 +#define RXRPC_PACKET_TYPE_DEBUG                8       /* debug info request */
10882 +#define RXRPC_N_PACKET_TYPES           9       /* number of packet types (incl type 0) */
10883 +
10884 +       u8      flags;          /* packet flags */
10885 +#define RXRPC_CLIENT_INITIATED 0x01            /* signifies a packet generated by a client */
10886 +#define RXRPC_REQUEST_ACK      0x02            /* request an unconditional ACK of this packet */
10887 +#define RXRPC_LAST_PACKET      0x04            /* the last packet from this side for this call */
10888 +#define RXRPC_MORE_PACKETS     0x08            /* more packets to come */
10889 +#define RXRPC_JUMBO_PACKET     0x20            /* [DATA] this is a jumbo packet */
10890 +#define RXRPC_SLOW_START_OK    0x20            /* [ACK] slow start supported */
10891 +
10892 +       u8      userStatus;     /* app-layer defined status */
10893 +       u8      securityIndex;  /* security protocol ID */
10894 +       u16     _rsvd;          /* reserved (used by kerberos security as cksum) */
10895 +       u16     serviceId;      /* service ID */
10896 +
10897 +} __attribute__((packed));
10898 +
10899 +#define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X)
10900 +
10901 +extern const char *rxrpc_pkts[];
10902 +
10903 +/*****************************************************************************/
10904 +/*
10905 + * jumbo packet secondary header
10906 + * - can be mapped to read header by:
10907 + *   - new_serial = serial + 1
10908 + *   - new_seq = seq + 1
10909 + *   - new_flags = j_flags
10910 + *   - new__rsvd = j__rsvd
10911 + *   - duplicating all other fields
10912 + */
10913 +struct rxrpc_jumbo_header
10914 +{
10915 +       u8      flags;          /* packet flags (as per rxrpc_header) */
10916 +       u8      pad;
10917 +       u16     _rsvd;          /* reserved (used by kerberos security as cksum) */
10918 +};
10919 +
10920 +#define RXRPC_JUMBO_DATALEN    1412    /* non-terminal jumbo packet data length */
10921 +
10922 +/*****************************************************************************/
10923 +/*
10924 + * on-the-wire Rx ACK packet data payload
10925 + * - all multibyte fields should be in network byte order
10926 + */
10927 +struct rxrpc_ackpacket
10928 +{
10929 +       u16     bufferSpace;    /* number of packet buffers available */
10930 +       u16     maxSkew;        /* diff between serno being ACK'd and highest serial no received */
10931 +       u32     firstPacket;    /* sequence no of first ACK'd packet in attached list */
10932 +       u32     previousPacket; /* sequence no of previous packet received */
10933 +       u32     serial;         /* serial no of packet that prompted this ACK */
10934 +
10935 +       u8      reason;         /* reason for ACK */
10936 +#define RXRPC_ACK_REQUESTED            1       /* ACK was requested on packet */
10937 +#define RXRPC_ACK_DUPLICATE            2       /* duplicate packet received */
10938 +#define RXRPC_ACK_OUT_OF_SEQUENCE      3       /* out of sequence packet received */
10939 +#define RXRPC_ACK_EXCEEDS_WINDOW       4       /* packet received beyond end of ACK window */
10940 +#define RXRPC_ACK_NOSPACE              5       /* packet discarded due to lack of buffer space */
10941 +#define RXRPC_ACK_PING                 6       /* keep alive ACK */
10942 +#define RXRPC_ACK_PING_RESPONSE                7       /* response to RXRPC_ACK_PING */
10943 +#define RXRPC_ACK_DELAY                        8       /* nothing happened since received packet */
10944 +#define RXRPC_ACK_IDLE                 9       /* ACK due to fully received ACK window */
10945 +
10946 +       u8      nAcks;          /* number of ACKs */
10947 +#define RXRPC_MAXACKS  255
10948 +
10949 +       u8      acks[0];        /* list of ACK/NAKs */
10950 +#define RXRPC_ACK_TYPE_NACK            0
10951 +#define RXRPC_ACK_TYPE_ACK             1
10952 +
10953 +} __attribute__((packed));
10954 +
10955 +extern const char *rxrpc_acks[];
10956 +
10957 +#endif /* _LINUX_RXRPC_PACKET_H */
10958 diff -urNp linux-5240/include/rxrpc/peer.h linux-5250/include/rxrpc/peer.h
10959 --- linux-5240/include/rxrpc/peer.h     1970-01-01 01:00:00.000000000 +0100
10960 +++ linux-5250/include/rxrpc/peer.h     
10961 @@ -0,0 +1,80 @@
10962 +/* peer.h: Rx RPC per-transport peer record
10963 + *
10964 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10965 + * Written by David Howells (dhowells@redhat.com)
10966 + *
10967 + * This program is free software; you can redistribute it and/or
10968 + * modify it under the terms of the GNU General Public License
10969 + * as published by the Free Software Foundation; either version
10970 + * 2 of the License, or (at your option) any later version.
10971 + */
10972 +
10973 +#ifndef _LINUX_RXRPC_PEER_H
10974 +#define _LINUX_RXRPC_PEER_H
10975 +
10976 +#include <linux/wait.h>
10977 +#include <rxrpc/types.h>
10978 +#include <rxrpc/krxtimod.h>
10979 +
10980 +struct rxrpc_peer_ops
10981 +{
10982 +       /* peer record being added */
10983 +       int (*adding)(struct rxrpc_peer *peer);
10984 +
10985 +       /* peer record being discarded from graveyard */
10986 +       void (*discarding)(struct rxrpc_peer *peer);
10987 +
10988 +       /* change of epoch detected on connection */
10989 +       void (*change_of_epoch)(struct rxrpc_connection *conn);
10990 +};
10991 +
10992 +/*****************************************************************************/
10993 +/*
10994 + * Rx RPC per-transport peer record
10995 + * - peers only retain a refcount on the transport when they are active
10996 + * - peers with refcount==0 are inactive and reside in the transport's graveyard
10997 + */
10998 +struct rxrpc_peer
10999 +{
11000 +       atomic_t                usage;
11001 +       struct rxrpc_peer_ops   *ops;           /* operations on this peer */
11002 +       struct rxrpc_transport  *trans;         /* owner transport */
11003 +       struct rxrpc_timer      timeout;        /* timeout for grave destruction */
11004 +       struct list_head        link;           /* link in transport's peer list */
11005 +       struct list_head        proc_link;      /* link in /proc list */
11006 +       rwlock_t                conn_lock;      /* lock for connections */
11007 +       struct list_head        conn_active;    /* active connections to/from this peer */
11008 +       struct list_head        conn_graveyard; /* graveyard for inactive connections */
11009 +       spinlock_t              conn_gylock;    /* lock for conn_graveyard */
11010 +       wait_queue_head_t       conn_gy_waitq;  /* wait queue hit when graveyard is empty */
11011 +       atomic_t                conn_count;     /* number of attached connections */
11012 +       struct in_addr          addr;           /* remote address */
11013 +       size_t                  if_mtu;         /* interface MTU for this peer */
11014 +       spinlock_t              lock;           /* access lock */
11015 +
11016 +       void                    *user;          /* application layer data */
11017 +
11018 +       /* calculated RTT cache */
11019 +#define RXRPC_RTT_CACHE_SIZE 32
11020 +       suseconds_t             rtt;            /* current RTT estimate (in uS) */
11021 +       unsigned short          rtt_point;      /* next entry at which to insert */
11022 +       unsigned short          rtt_usage;      /* amount of cache actually used */
11023 +       suseconds_t             rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
11024 +};
11025 +
11026 +
11027 +extern int rxrpc_peer_lookup(struct rxrpc_transport *trans,
11028 +                            u32 addr,
11029 +                            struct rxrpc_peer **_peer);
11030 +
11031 +static inline void rxrpc_get_peer(struct rxrpc_peer *peer)
11032 +{
11033 +       if (atomic_read(&peer->usage)<0)
11034 +               BUG();
11035 +       atomic_inc(&peer->usage);
11036 +       //printk("rxrpc_get_peer(%p{u=%d})\n",peer,atomic_read(&peer->usage));
11037 +}
11038 +
11039 +extern void rxrpc_put_peer(struct rxrpc_peer *peer);
11040 +
11041 +#endif /* _LINUX_RXRPC_PEER_H */
11042 diff -urNp linux-5240/include/rxrpc/rxrpc.h linux-5250/include/rxrpc/rxrpc.h
11043 --- linux-5240/include/rxrpc/rxrpc.h    1970-01-01 01:00:00.000000000 +0100
11044 +++ linux-5250/include/rxrpc/rxrpc.h    
11045 @@ -0,0 +1,29 @@
11046 +/* rx.h: Rx RPC interface
11047 + *
11048 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
11049 + * Written by David Howells (dhowells@redhat.com)
11050 + *
11051 + * This program is free software; you can redistribute it and/or
11052 + * modify it under the terms of the GNU General Public License
11053 + * as published by the Free Software Foundation; either version
11054 + * 2 of the License, or (at your option) any later version.
11055 + */
11056 +
11057 +#ifndef _LINUX_RXRPC_RXRPC_H
11058 +#define _LINUX_RXRPC_RXRPC_H
11059 +
11060 +#ifdef __KERNEL__
11061 +
11062 +extern u32 rxrpc_epoch;
11063 +
11064 +extern int rxrpc_ktrace;
11065 +extern int rxrpc_kdebug;
11066 +extern int rxrpc_kproto;
11067 +extern int rxrpc_knet;
11068 +
11069 +extern int rxrpc_sysctl_init(void);
11070 +extern void rxrpc_sysctl_cleanup(void);
11071 +
11072 +#endif /* __KERNEL__ */
11073 +
11074 +#endif /* _LINUX_RXRPC_RXRPC_H */
11075 diff -urNp linux-5240/include/rxrpc/transport.h linux-5250/include/rxrpc/transport.h
11076 --- linux-5240/include/rxrpc/transport.h        1970-01-01 01:00:00.000000000 +0100
11077 +++ linux-5250/include/rxrpc/transport.h        
11078 @@ -0,0 +1,115 @@
11079 +/* transport.h: Rx transport management
11080 + *
11081 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
11082 + * Written by David Howells (dhowells@redhat.com)
11083 + *
11084 + * This program is free software; you can redistribute it and/or
11085 + * modify it under the terms of the GNU General Public License
11086 + * as published by the Free Software Foundation; either version
11087 + * 2 of the License, or (at your option) any later version.
11088 + */
11089 +
11090 +#ifndef _LINUX_RXRPC_TRANSPORT_H
11091 +#define _LINUX_RXRPC_TRANSPORT_H
11092 +
11093 +#include <rxrpc/types.h>
11094 +#include <rxrpc/krxiod.h>
11095 +#include <rxrpc/rxrpc.h>
11096 +#include <linux/skbuff.h>
11097 +#include <linux/rwsem.h>
11098 +
11099 +typedef int (*rxrpc_newcall_fnx_t)(struct rxrpc_call *call);
11100 +
11101 +extern wait_queue_head_t rxrpc_krxiod_wq;
11102 +
11103 +/*****************************************************************************/
11104 +/*
11105 + * Rx operation specification
11106 + * - tables of these must be sorted by op ID so that they can be binary-chop searched
11107 + */
11108 +struct rxrpc_operation
11109 +{
11110 +       unsigned                id;             /* operation ID */
11111 +       size_t                  asize;          /* minimum size of argument block */
11112 +       const char              *name;          /* name of operation */
11113 +       void                    *user;          /* initial user data */
11114 +};
11115 +
11116 +/*****************************************************************************/
11117 +/*
11118 + * Rx transport service record
11119 + */
11120 +struct rxrpc_service
11121 +{
11122 +       struct list_head        link;           /* link in services list on transport */
11123 +       struct module           *owner;         /* owner module */
11124 +       rxrpc_newcall_fnx_t     new_call;       /* new call handler function */
11125 +       const char              *name;          /* name of service */
11126 +       unsigned short          service_id;     /* Rx service ID */
11127 +       rxrpc_call_attn_func_t  attn_func;      /* call requires attention callback */
11128 +       rxrpc_call_error_func_t error_func;     /* call error callback */
11129 +       rxrpc_call_aemap_func_t aemap_func;     /* abort -> errno mapping callback */
11130 +
11131 +       const struct rxrpc_operation    *ops_begin;     /* beginning of operations table */
11132 +       const struct rxrpc_operation    *ops_end;       /* end of operations table */
11133 +};
11134 +
11135 +/*****************************************************************************/
11136 +/*
11137 + * Rx transport endpoint record
11138 + */
11139 +struct rxrpc_transport
11140 +{
11141 +       atomic_t                usage;
11142 +       struct socket           *socket;        /* my UDP socket */
11143 +       struct list_head        services;       /* services listening on this socket */
11144 +       struct list_head        link;           /* link in transport list */
11145 +       struct list_head        proc_link;      /* link in transport proc list */
11146 +       struct list_head        krxiodq_link;   /* krxiod attention queue link */
11147 +       spinlock_t              lock;           /* access lock */
11148 +       struct list_head        peer_active;    /* active peers connected to over this socket */
11149 +       struct list_head        peer_graveyard; /* inactive peer list */
11150 +       spinlock_t              peer_gylock;    /* peer graveyard lock */
11151 +       wait_queue_head_t       peer_gy_waitq;  /* wait queue hit when peer graveyard is empty */
11152 +       rwlock_t                peer_lock;      /* peer list access lock */
11153 +       atomic_t                peer_count;     /* number of peers */
11154 +       struct rxrpc_peer_ops   *peer_ops;      /* default peer operations */
11155 +       unsigned short          port;           /* port upon which listening */
11156 +       volatile char           error_rcvd;     /* T if received ICMP error outstanding */
11157 +};
11158 +
11159 +extern struct list_head rxrpc_transports;
11160 +
11161 +extern int rxrpc_create_transport(unsigned short port,
11162 +                                 struct rxrpc_transport **_trans);
11163 +
11164 +static inline void rxrpc_get_transport(struct rxrpc_transport *trans)
11165 +{
11166 +       if (atomic_read(&trans->usage)<=0)
11167 +               BUG();
11168 +       atomic_inc(&trans->usage);
11169 +       //printk("rxrpc_get_transport(%p{u=%d})\n",trans,atomic_read(&trans->usage));
11170 +}
11171 +
11172 +extern void rxrpc_put_transport(struct rxrpc_transport *trans);
11173 +
11174 +extern int rxrpc_add_service(struct rxrpc_transport *trans,
11175 +                            struct rxrpc_service *srv);
11176 +
11177 +extern void rxrpc_del_service(struct rxrpc_transport *trans,
11178 +                             struct rxrpc_service *srv);
11179 +
11180 +#if 0
11181 +extern int rxrpc_trans_add_connection(struct rxrpc_transport *trans,
11182 +                                     struct rxrpc_connection *conn);
11183 +#endif
11184 +
11185 +extern void rxrpc_trans_receive_packet(struct rxrpc_transport *trans);
11186 +
11187 +extern int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
11188 +                                      struct rxrpc_message *msg,
11189 +                                      int error);
11190 +
11191 +extern void rxrpc_clear_transport(struct rxrpc_transport *trans);
11192 +
11193 +#endif /* _LINUX_RXRPC_TRANSPORT_H */
11194 diff -urNp linux-5240/include/rxrpc/types.h linux-5250/include/rxrpc/types.h
11195 --- linux-5240/include/rxrpc/types.h    1970-01-01 01:00:00.000000000 +0100
11196 +++ linux-5250/include/rxrpc/types.h    
11197 @@ -0,0 +1,39 @@
11198 +/* types.h: Rx types
11199 + *
11200 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
11201 + * Written by David Howells (dhowells@redhat.com)
11202 + *
11203 + * This program is free software; you can redistribute it and/or
11204 + * modify it under the terms of the GNU General Public License
11205 + * as published by the Free Software Foundation; either version
11206 + * 2 of the License, or (at your option) any later version.
11207 + */
11208 +
11209 +#ifndef _LINUX_RXRPC_TYPES_H
11210 +#define _LINUX_RXRPC_TYPES_H
11211 +
11212 +#include <linux/types.h>
11213 +#include <linux/list.h>
11214 +#include <linux/socket.h>
11215 +#include <linux/in.h>
11216 +#include <linux/spinlock.h>
11217 +#include <asm/atomic.h>
11218 +
11219 +typedef unsigned       rxrpc_seq_t;    /* Rx message sequence number */
11220 +typedef unsigned       rxrpc_serial_t; /* Rx message serial number */
11221 +
11222 +struct rxrpc_call;
11223 +struct rxrpc_connection;
11224 +struct rxrpc_header;
11225 +struct rxrpc_message;
11226 +struct rxrpc_operation;
11227 +struct rxrpc_peer;
11228 +struct rxrpc_service;
11229 +typedef struct rxrpc_timer rxrpc_timer_t;
11230 +struct rxrpc_transport;
11231 +
11232 +typedef void (*rxrpc_call_attn_func_t)(struct rxrpc_call *call);
11233 +typedef void (*rxrpc_call_error_func_t)(struct rxrpc_call *call);
11234 +typedef void (*rxrpc_call_aemap_func_t)(struct rxrpc_call *call);
11235 +
11236 +#endif /* _LINUX_RXRPC_TYPES_H */
11237 diff -urNp linux-5240/net/Makefile linux-5250/net/Makefile
11238 --- linux-5240/net/Makefile     
11239 +++ linux-5250/net/Makefile     
11240 @@ -7,7 +7,7 @@
11241  O_TARGET :=    network.o
11242  
11243  mod-subdirs := ipv4/netfilter ipv6/netfilter bridge/netfilter ipx irda \
11244 -       bluetooth atm netlink sched core
11245 +       bluetooth atm netlink sched core rxrpc
11246  export-objs := netsyms.o
11247  
11248  subdir-y :=    core ethernet
11249 @@ -46,6 +46,7 @@ subdir-$(CONFIG_ATM)          += atm
11250  subdir-$(CONFIG_DECNET)                += decnet
11251  subdir-$(CONFIG_ECONET)                += econet
11252  subdir-$(CONFIG_VLAN_8021Q)           += 8021q
11253 +subdir-$(CONFIG_AFS_FS)                += rxrpc
11254  
11255  
11256  obj-y  := socket.o $(join $(subdir-y), $(patsubst %,/%.o,$(notdir $(subdir-y))))
11257 diff -urNp linux-5240/net/rxrpc/call.c linux-5250/net/rxrpc/call.c
11258 --- linux-5240/net/rxrpc/call.c 1970-01-01 01:00:00.000000000 +0100
11259 +++ linux-5250/net/rxrpc/call.c 
11260 @@ -0,0 +1,2122 @@
11261 +/* call.c: Rx call routines
11262 + *
11263 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
11264 + * Written by David Howells (dhowells@redhat.com)
11265 + *
11266 + * This program is free software; you can redistribute it and/or
11267 + * modify it under the terms of the GNU General Public License
11268 + * as published by the Free Software Foundation; either version
11269 + * 2 of the License, or (at your option) any later version.
11270 + */
11271 +
11272 +#include <linux/sched.h>
11273 +#include <linux/slab.h>
11274 +#include <linux/module.h>
11275 +#include <rxrpc/rxrpc.h>
11276 +#include <rxrpc/transport.h>
11277 +#include <rxrpc/peer.h>
11278 +#include <rxrpc/connection.h>
11279 +#include <rxrpc/call.h>
11280 +#include <rxrpc/message.h>
11281 +#include "internal.h"
11282 +
11283 +__RXACCT_DECL(atomic_t rxrpc_call_count);
11284 +__RXACCT_DECL(atomic_t rxrpc_message_count);
11285 +
11286 +LIST_HEAD(rxrpc_calls);
11287 +DECLARE_RWSEM(rxrpc_calls_sem);
11288 +
11289 +unsigned rxrpc_call_rcv_timeout                = 30;
11290 +unsigned rxrpc_call_acks_timeout       = 30;
11291 +unsigned rxrpc_call_dfr_ack_timeout    = 5;
11292 +unsigned short rxrpc_call_max_resend   = 10;
11293 +
11294 +const char *rxrpc_call_states[] = {
11295 +       "COMPLETE",
11296 +       "ERROR",
11297 +       "SRVR_RCV_OPID",
11298 +       "SRVR_RCV_ARGS",
11299 +       "SRVR_GOT_ARGS",
11300 +       "SRVR_SND_REPLY",
11301 +       "SRVR_RCV_FINAL_ACK",
11302 +       "CLNT_SND_ARGS",
11303 +       "CLNT_RCV_REPLY",
11304 +       "CLNT_GOT_REPLY"
11305 +};
11306 +
11307 +const char *rxrpc_call_error_states[] = {
11308 +       "NO_ERROR",
11309 +       "LOCAL_ABORT",
11310 +       "PEER_ABORT",
11311 +       "LOCAL_ERROR",
11312 +       "REMOTE_ERROR"
11313 +};
11314 +
11315 +const char *rxrpc_pkts[] = {
11316 +       "?00", "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug",
11317 +       "?09", "?10", "?11", "?12", "?13", "?14", "?15"
11318 +};
11319 +
11320 +const char *rxrpc_acks[] = {
11321 +       "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", "-?-"
11322 +};
11323 +
11324 +static const char _acktype[] = "NA-";
11325 +
11326 +static void rxrpc_call_receive_packet(struct rxrpc_call *call);
11327 +static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc_message *msg);
11328 +static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_message *msg);
11329 +static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, rxrpc_seq_t higest);
11330 +static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest);
11331 +static int __rxrpc_call_read_data(struct rxrpc_call *call);
11332 +
11333 +static int rxrpc_call_record_ACK(struct rxrpc_call *call,
11334 +                                struct rxrpc_message *msg,
11335 +                                rxrpc_seq_t seq,
11336 +                                size_t count);
11337 +#define _state(call) \
11338 +       _debug("[[[ state %s ]]]",rxrpc_call_states[call->app_call_state]);
11339 +
11340 +static void rxrpc_call_default_attn_func(struct rxrpc_call *call)
11341 +{
11342 +       wake_up(&call->waitq);
11343 +}
11344 +
11345 +static void rxrpc_call_default_error_func(struct rxrpc_call *call)
11346 +{
11347 +       wake_up(&call->waitq);
11348 +}
11349 +
11350 +static void rxrpc_call_default_aemap_func(struct rxrpc_call *call)
11351 +{
11352 +       switch (call->app_err_state) {
11353 +       case RXRPC_ESTATE_LOCAL_ABORT:
11354 +               call->app_abort_code = -call->app_errno;
11355 +       case RXRPC_ESTATE_PEER_ABORT:
11356 +               call->app_errno = -ECONNABORTED;
11357 +       default:
11358 +               break;
11359 +       }
11360 +}
11361 +
11362 +static void __rxrpc_call_acks_timeout(unsigned long _call)
11363 +{
11364 +       struct rxrpc_call *call = (struct rxrpc_call *) _call;
11365 +
11366 +       _debug("ACKS TIMEOUT %05lu",jiffies - call->cjif);
11367 +
11368 +       call->flags |= RXRPC_CALL_ACKS_TIMO;
11369 +       rxrpc_krxiod_queue_call(call);
11370 +}
11371 +
11372 +static void __rxrpc_call_rcv_timeout(unsigned long _call)
11373 +{
11374 +       struct rxrpc_call *call = (struct rxrpc_call *) _call;
11375 +
11376 +       _debug("RCV TIMEOUT %05lu",jiffies - call->cjif);
11377 +
11378 +       call->flags |= RXRPC_CALL_RCV_TIMO;
11379 +       rxrpc_krxiod_queue_call(call);
11380 +}
11381 +
11382 +static void __rxrpc_call_ackr_timeout(unsigned long _call)
11383 +{
11384 +       struct rxrpc_call *call = (struct rxrpc_call *) _call;
11385 +
11386 +       _debug("ACKR TIMEOUT %05lu",jiffies - call->cjif);
11387 +
11388 +       call->flags |= RXRPC_CALL_ACKR_TIMO;
11389 +       rxrpc_krxiod_queue_call(call);
11390 +}
11391 +
11392 +/*****************************************************************************/
11393 +/*
11394 + * create a new call record
11395 + */
11396 +static inline int __rxrpc_create_call(struct rxrpc_connection *conn,
11397 +                                     struct rxrpc_call **_call)
11398 +{
11399 +       struct rxrpc_call *call;
11400 +
11401 +       _enter("%p",conn);
11402 +
11403 +       /* allocate and initialise a call record */
11404 +       call = (struct rxrpc_call *) get_zeroed_page(GFP_KERNEL);
11405 +       if (!call) {
11406 +               _leave(" ENOMEM");
11407 +               return -ENOMEM;
11408 +       }
11409 +
11410 +       atomic_set(&call->usage,1);
11411 +
11412 +       init_waitqueue_head(&call->waitq);
11413 +       spin_lock_init(&call->lock);
11414 +       INIT_LIST_HEAD(&call->link);
11415 +       INIT_LIST_HEAD(&call->acks_pendq);
11416 +       INIT_LIST_HEAD(&call->rcv_receiveq);
11417 +       INIT_LIST_HEAD(&call->rcv_krxiodq_lk);
11418 +       INIT_LIST_HEAD(&call->app_readyq);
11419 +       INIT_LIST_HEAD(&call->app_unreadyq);
11420 +       INIT_LIST_HEAD(&call->app_link);
11421 +       INIT_LIST_HEAD(&call->app_attn_link);
11422 +
11423 +       init_timer(&call->acks_timeout);
11424 +       call->acks_timeout.data = (unsigned long) call;
11425 +       call->acks_timeout.function = __rxrpc_call_acks_timeout;
11426 +
11427 +       init_timer(&call->rcv_timeout);
11428 +       call->rcv_timeout.data = (unsigned long) call;
11429 +       call->rcv_timeout.function = __rxrpc_call_rcv_timeout;
11430 +
11431 +       init_timer(&call->ackr_dfr_timo);
11432 +       call->ackr_dfr_timo.data = (unsigned long) call;
11433 +       call->ackr_dfr_timo.function = __rxrpc_call_ackr_timeout;
11434 +
11435 +       call->conn = conn;
11436 +       call->ackr_win_bot = 1;
11437 +       call->ackr_win_top = call->ackr_win_bot + RXRPC_CALL_ACK_WINDOW_SIZE - 1;
11438 +       call->ackr_prev_seq = 0;
11439 +       call->app_mark = RXRPC_APP_MARK_EOF;
11440 +       call->app_attn_func = rxrpc_call_default_attn_func;
11441 +       call->app_error_func = rxrpc_call_default_error_func;
11442 +       call->app_aemap_func = rxrpc_call_default_aemap_func;
11443 +       call->app_scr_alloc = call->app_scratch;
11444 +
11445 +       call->cjif = jiffies;
11446 +
11447 +       _leave(" = 0 (%p)",call);
11448 +
11449 +       *_call = call;
11450 +
11451 +       return 0;
11452 +} /* end __rxrpc_create_call() */
11453 +
11454 +/*****************************************************************************/
11455 +/*
11456 + * create a new call record for outgoing calls
11457 + */
11458 +int rxrpc_create_call(struct rxrpc_connection *conn,
11459 +                     rxrpc_call_attn_func_t attn,
11460 +                     rxrpc_call_error_func_t error,
11461 +                     rxrpc_call_aemap_func_t aemap,
11462 +                     struct rxrpc_call **_call)
11463 +{
11464 +       DECLARE_WAITQUEUE(myself,current);
11465 +
11466 +       struct rxrpc_call *call;
11467 +       int ret, cix, loop;
11468 +
11469 +       _enter("%p",conn);
11470 +
11471 +       /* allocate and initialise a call record */
11472 +       ret = __rxrpc_create_call(conn,&call);
11473 +       if (ret<0) {
11474 +               _leave(" = %d",ret);
11475 +               return ret;
11476 +       }
11477 +
11478 +       call->app_call_state = RXRPC_CSTATE_CLNT_SND_ARGS;
11479 +       if (attn) call->app_attn_func = attn;
11480 +       if (error) call->app_error_func = error;
11481 +       if (aemap) call->app_aemap_func = aemap;
11482 +
11483 +       _state(call);
11484 +
11485 +       spin_lock(&conn->lock);
11486 +       set_current_state(TASK_INTERRUPTIBLE);
11487 +       add_wait_queue(&conn->chanwait,&myself);
11488 +
11489 + try_again:
11490 +       /* try to find an unused channel */
11491 +       for (cix=0; cix<4; cix++)
11492 +               if (!conn->channels[cix])
11493 +                       goto obtained_chan;
11494 +
11495 +       /* no free channels - wait for one to become available */
11496 +       ret = -EINTR;
11497 +       if (signal_pending(current))
11498 +               goto error_unwait;
11499 +
11500 +       spin_unlock(&conn->lock);
11501 +
11502 +       schedule();
11503 +       set_current_state(TASK_INTERRUPTIBLE);
11504 +
11505 +       spin_lock(&conn->lock);
11506 +       goto try_again;
11507 +
11508 +       /* got a channel - now attach to the connection */
11509 + obtained_chan:
11510 +       remove_wait_queue(&conn->chanwait,&myself);
11511 +       set_current_state(TASK_RUNNING);
11512 +
11513 +       /* concoct a unique call number */
11514 + next_callid:
11515 +       call->call_id = htonl(++conn->call_counter);
11516 +       for (loop=0; loop<4; loop++)
11517 +               if (conn->channels[loop] && conn->channels[loop]->call_id==call->call_id)
11518 +                       goto next_callid;
11519 +
11520 +       rxrpc_get_connection(conn);
11521 +       conn->channels[cix] = call; /* assign _after_ done callid check loop */
11522 +       conn->atime = xtime;
11523 +       call->chan_ix = htonl(cix);
11524 +
11525 +       spin_unlock(&conn->lock);
11526 +
11527 +       down_write(&rxrpc_calls_sem);
11528 +       list_add_tail(&call->call_link,&rxrpc_calls);
11529 +       up_write(&rxrpc_calls_sem);
11530 +
11531 +       __RXACCT(atomic_inc(&rxrpc_call_count));
11532 +       *_call = call;
11533 +
11534 +       _leave(" = 0 (call=%p cix=%u)",call,cix);
11535 +       return 0;
11536 +
11537 + error_unwait:
11538 +       remove_wait_queue(&conn->chanwait,&myself);
11539 +       set_current_state(TASK_RUNNING);
11540 +       spin_unlock(&conn->lock);
11541 +
11542 +       free_page((unsigned long)call);
11543 +       _leave(" = %d",ret);
11544 +       return ret;
11545 +
11546 +} /* end rxrpc_create_call() */
11547 +
11548 +/*****************************************************************************/
11549 +/*
11550 + * create a new call record for incoming calls
11551 + */
11552 +int rxrpc_incoming_call(struct rxrpc_connection *conn,
11553 +                       struct rxrpc_message *msg,
11554 +                       struct rxrpc_call **_call)
11555 +{
11556 +       struct rxrpc_call *call;
11557 +       unsigned cix;
11558 +       int ret;
11559 +
11560 +       cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
11561 +
11562 +       _enter("%p,%u,%u",conn,ntohl(msg->hdr.callNumber),cix);
11563 +
11564 +       /* allocate and initialise a call record */
11565 +       ret = __rxrpc_create_call(conn,&call);
11566 +       if (ret<0) {
11567 +               _leave(" = %d",ret);
11568 +               return ret;
11569 +       }
11570 +
11571 +       call->pkt_rcv_count = 1;
11572 +       call->app_call_state = RXRPC_CSTATE_SRVR_RCV_OPID;
11573 +       call->app_mark = sizeof(u32);
11574 +
11575 +       _state(call);
11576 +
11577 +       /* attach to the connection */
11578 +       ret = -EBUSY;
11579 +       call->chan_ix = htonl(cix);
11580 +       call->call_id = msg->hdr.callNumber;
11581 +
11582 +       spin_lock(&conn->lock);
11583 +
11584 +       if (!conn->channels[cix]) {
11585 +               conn->channels[cix] = call;
11586 +               rxrpc_get_connection(conn);
11587 +               ret = 0;
11588 +       }
11589 +
11590 +       spin_unlock(&conn->lock);
11591 +
11592 +       if (ret<0) free_page((unsigned long)call);
11593 +
11594 +       _leave(" = %p",call);
11595 +
11596 +       if (ret==0) {
11597 +               down_write(&rxrpc_calls_sem);
11598 +               list_add_tail(&call->call_link,&rxrpc_calls);
11599 +               up_write(&rxrpc_calls_sem);
11600 +               __RXACCT(atomic_inc(&rxrpc_call_count));
11601 +               *_call = call;
11602 +       }
11603 +
11604 +       return ret;
11605 +} /* end rxrpc_incoming_call() */
11606 +
11607 +/*****************************************************************************/
11608 +/*
11609 + * free a call record
11610 + */
11611 +void rxrpc_put_call(struct rxrpc_call *call)
11612 +{
11613 +       struct rxrpc_connection *conn = call->conn;
11614 +       struct rxrpc_message *msg;
11615 +
11616 +       _enter("%p{u=%d}",call,atomic_read(&call->usage));
11617 +
11618 +       /* sanity check */
11619 +       if (atomic_read(&call->usage)<=0)
11620 +               BUG();
11621 +
11622 +       /* to prevent a race, the decrement and the de-list must be effectively atomic */
11623 +       spin_lock(&conn->lock);
11624 +       if (likely(!atomic_dec_and_test(&call->usage))) {
11625 +               spin_unlock(&conn->lock);
11626 +               _leave("");
11627 +               return;
11628 +       }
11629 +
11630 +       conn->channels[ntohl(call->chan_ix)] = NULL;
11631 +
11632 +       spin_unlock(&conn->lock);
11633 +
11634 +       wake_up(&conn->chanwait);
11635 +
11636 +       rxrpc_put_connection(conn);
11637 +
11638 +       /* clear the timers and dequeue from krxiod */
11639 +       del_timer_sync(&call->acks_timeout);
11640 +       del_timer_sync(&call->rcv_timeout);
11641 +       del_timer_sync(&call->ackr_dfr_timo);
11642 +
11643 +       rxrpc_krxiod_dequeue_call(call);
11644 +
11645 +       /* clean up the contents of the struct */
11646 +       if (call->snd_nextmsg)
11647 +               rxrpc_put_message(call->snd_nextmsg);
11648 +
11649 +       if (call->snd_ping)
11650 +               rxrpc_put_message(call->snd_ping);
11651 +
11652 +       while (!list_empty(&call->acks_pendq)) {
11653 +               msg = list_entry(call->acks_pendq.next,struct rxrpc_message,link);
11654 +               list_del(&msg->link);
11655 +               rxrpc_put_message(msg);
11656 +       }
11657 +
11658 +       while (!list_empty(&call->rcv_receiveq)) {
11659 +               msg = list_entry(call->rcv_receiveq.next,struct rxrpc_message,link);
11660 +               list_del(&msg->link);
11661 +               rxrpc_put_message(msg);
11662 +       }
11663 +
11664 +       while (!list_empty(&call->app_readyq)) {
11665 +               msg = list_entry(call->app_readyq.next,struct rxrpc_message,link);
11666 +               list_del(&msg->link);
11667 +               rxrpc_put_message(msg);
11668 +       }
11669 +
11670 +       while (!list_empty(&call->app_unreadyq)) {
11671 +               msg = list_entry(call->app_unreadyq.next,struct rxrpc_message,link);
11672 +               list_del(&msg->link);
11673 +               rxrpc_put_message(msg);
11674 +       }
11675 +
11676 +       if (call->owner) __MOD_DEC_USE_COUNT(call->owner);
11677 +
11678 +       down_write(&rxrpc_calls_sem);
11679 +       list_del(&call->call_link);
11680 +       up_write(&rxrpc_calls_sem);
11681 +
11682 +       __RXACCT(atomic_dec(&rxrpc_call_count));
11683 +       free_page((unsigned long)call);
11684 +
11685 +       _leave(" [destroyed]");
11686 +} /* end rxrpc_put_call() */
11687 +
11688 +/*****************************************************************************/
11689 +/*
11690 + * actually generate a normal ACK
11691 + */
11692 +static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call, rxrpc_seq_t seq)
11693 +{
11694 +       struct rxrpc_message *msg;
11695 +       struct iovec diov[3];
11696 +       unsigned aux[4];
11697 +       int delta, ret;
11698 +
11699 +       /* ACKs default to DELAY */
11700 +       if (!call->ackr.reason)
11701 +               call->ackr.reason = RXRPC_ACK_DELAY;
11702 +
11703 +       _proto("Rx %05lu Sending ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
11704 +              jiffies - call->cjif,
11705 +              ntohs(call->ackr.maxSkew),
11706 +              ntohl(call->ackr.firstPacket),
11707 +              ntohl(call->ackr.previousPacket),
11708 +              ntohl(call->ackr.serial),
11709 +              rxrpc_acks[call->ackr.reason],
11710 +              call->ackr.nAcks);
11711 +
11712 +       aux[0] = htonl(call->conn->peer->if_mtu);       /* interface MTU */
11713 +       aux[1] = htonl(1444);                           /* max MTU */
11714 +       aux[2] = htonl(16);                             /* rwind */
11715 +       aux[3] = htonl(4);                              /* max packets */
11716 +
11717 +       diov[0].iov_len  = sizeof(struct rxrpc_ackpacket);
11718 +       diov[0].iov_base = &call->ackr;
11719 +       diov[1].iov_len  = (call->ackr_pend_cnt+3);
11720 +       diov[1].iov_base = call->ackr_array;
11721 +       diov[2].iov_len  = sizeof(aux);
11722 +       diov[2].iov_base = &aux;
11723 +
11724 +       /* build and send the message */
11725 +       ret = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_ACK,3,diov,GFP_KERNEL,&msg);
11726 +       if (ret<0)
11727 +               goto out;
11728 +
11729 +       msg->seq = seq;
11730 +       msg->hdr.seq = htonl(seq);
11731 +       msg->hdr.flags |= RXRPC_SLOW_START_OK;
11732 +
11733 +       ret = rxrpc_conn_sendmsg(call->conn,msg);
11734 +       rxrpc_put_message(msg);
11735 +       if (ret<0)
11736 +               goto out;
11737 +       call->pkt_snd_count++;
11738 +
11739 +       /* count how many actual ACKs there were at the front */
11740 +       for (delta=0; delta<call->ackr_pend_cnt; delta++)
11741 +               if (call->ackr_array[delta]!=RXRPC_ACK_TYPE_ACK)
11742 +                       break;
11743 +
11744 +       call->ackr_pend_cnt -= delta; /* all ACK'd to this point */
11745 +
11746 +       /* crank the ACK window around */
11747 +       if (delta==0) {
11748 +               /* un-ACK'd window */
11749 +       }
11750 +       else if (delta < RXRPC_CALL_ACK_WINDOW_SIZE) {
11751 +               /* partially ACK'd window
11752 +                * - shuffle down to avoid losing out-of-sequence packets
11753 +                */
11754 +               call->ackr_win_bot += delta;
11755 +               call->ackr_win_top += delta;
11756 +
11757 +               memmove(&call->ackr_array[0],
11758 +                       &call->ackr_array[delta],
11759 +                       call->ackr_pend_cnt);
11760 +
11761 +               memset(&call->ackr_array[call->ackr_pend_cnt],
11762 +                      RXRPC_ACK_TYPE_NACK,
11763 +                      sizeof(call->ackr_array) - call->ackr_pend_cnt);
11764 +       }
11765 +       else {
11766 +               /* fully ACK'd window
11767 +                * - just clear the whole thing
11768 +                */
11769 +               memset(&call->ackr_array,RXRPC_ACK_TYPE_NACK,sizeof(call->ackr_array));
11770 +       }
11771 +
11772 +       /* clear this ACK */
11773 +       memset(&call->ackr,0,sizeof(call->ackr));
11774 +
11775 + out:
11776 +       if (!call->app_call_state) printk("___ STATE 0 ___\n");
11777 +       return ret;
11778 +} /* end __rxrpc_call_gen_normal_ACK() */
11779 +
11780 +/*****************************************************************************/
11781 +/*
11782 + * note the reception of a packet in the call's ACK records and generate an appropriate ACK packet
11783 + * if necessary
11784 + * - returns 0 if packet should be processed, 1 if packet should be ignored and -ve on an error
11785 + */
11786 +static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
11787 +                                  struct rxrpc_header *hdr,
11788 +                                  struct rxrpc_ackpacket *ack)
11789 +{
11790 +       struct rxrpc_message *msg;
11791 +       rxrpc_seq_t seq;
11792 +       unsigned offset;
11793 +       int ret = 0, err;
11794 +       u8 special_ACK, do_ACK, force;
11795 +
11796 +       _enter("%p,%p { seq=%d tp=%d fl=%02x }",call,hdr,ntohl(hdr->seq),hdr->type,hdr->flags);
11797 +
11798 +       seq = ntohl(hdr->seq);
11799 +       offset = seq - call->ackr_win_bot;
11800 +       do_ACK = RXRPC_ACK_DELAY;
11801 +       special_ACK = 0;
11802 +       force = (seq==1);
11803 +
11804 +       if (call->ackr_high_seq < seq)
11805 +               call->ackr_high_seq = seq;
11806 +
11807 +       /* deal with generation of obvious special ACKs first */
11808 +       if (ack && ack->reason==RXRPC_ACK_PING) {
11809 +               special_ACK = RXRPC_ACK_PING_RESPONSE;
11810 +               ret = 1;
11811 +               goto gen_ACK;
11812 +       }
11813 +
11814 +       if (seq < call->ackr_win_bot) {
11815 +               special_ACK = RXRPC_ACK_DUPLICATE;
11816 +               ret = 1;
11817 +               goto gen_ACK;
11818 +       }
11819 +
11820 +       if (seq >= call->ackr_win_top) {
11821 +               special_ACK = RXRPC_ACK_EXCEEDS_WINDOW;
11822 +               ret = 1;
11823 +               goto gen_ACK;
11824 +       }
11825 +
11826 +       if (call->ackr_array[offset] != RXRPC_ACK_TYPE_NACK) {
11827 +               special_ACK = RXRPC_ACK_DUPLICATE;
11828 +               ret = 1;
11829 +               goto gen_ACK;
11830 +       }
11831 +
11832 +       /* okay... it's a normal data packet inside the ACK window */
11833 +       call->ackr_array[offset] = RXRPC_ACK_TYPE_ACK;
11834 +
11835 +       if (offset<call->ackr_pend_cnt) {
11836 +       }
11837 +       else if (offset>call->ackr_pend_cnt) {
11838 +               do_ACK = RXRPC_ACK_OUT_OF_SEQUENCE;
11839 +               call->ackr_pend_cnt = offset;
11840 +               goto gen_ACK;
11841 +       }
11842 +
11843 +       if (hdr->flags & RXRPC_REQUEST_ACK) {
11844 +               do_ACK = RXRPC_ACK_REQUESTED;
11845 +       }
11846 +
11847 +       /* generate an ACK on the final packet of a reply just received */
11848 +       if (hdr->flags & RXRPC_LAST_PACKET) {
11849 +               if (call->conn->out_clientflag)
11850 +                       force = 1;
11851 +       }
11852 +       else if (!(hdr->flags & RXRPC_MORE_PACKETS)) {
11853 +               do_ACK = RXRPC_ACK_REQUESTED;
11854 +       }
11855 +
11856 +       /* re-ACK packets previously received out-of-order */
11857 +       for (offset++; offset<RXRPC_CALL_ACK_WINDOW_SIZE; offset++)
11858 +               if (call->ackr_array[offset]!=RXRPC_ACK_TYPE_ACK)
11859 +                       break;
11860 +
11861 +       call->ackr_pend_cnt = offset;
11862 +
11863 +       /* generate an ACK if we fill up the window */
11864 +       if (call->ackr_pend_cnt >= RXRPC_CALL_ACK_WINDOW_SIZE)
11865 +               force = 1;
11866 +
11867 + gen_ACK:
11868 +       _debug("%05lu ACKs pend=%u norm=%s special=%s%s",
11869 +              jiffies - call->cjif,
11870 +              call->ackr_pend_cnt,rxrpc_acks[do_ACK],rxrpc_acks[special_ACK],
11871 +              force ? " immediate" :
11872 +              do_ACK==RXRPC_ACK_REQUESTED ? " merge-req" :
11873 +              hdr->flags & RXRPC_LAST_PACKET ? " finalise" :
11874 +              " defer"
11875 +              );
11876 +
11877 +       /* send any pending normal ACKs if need be */
11878 +       if (call->ackr_pend_cnt>0) {
11879 +               /* fill out the appropriate form */
11880 +               call->ackr.bufferSpace          = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
11881 +               call->ackr.maxSkew              = htons(min(call->ackr_high_seq - seq,65535U));
11882 +               call->ackr.firstPacket          = htonl(call->ackr_win_bot);
11883 +               call->ackr.previousPacket       = call->ackr_prev_seq;
11884 +               call->ackr.serial               = hdr->serial;
11885 +               call->ackr.nAcks                = call->ackr_pend_cnt;
11886 +
11887 +               if (do_ACK==RXRPC_ACK_REQUESTED)
11888 +                       call->ackr.reason = do_ACK;
11889 +
11890 +               /* generate the ACK immediately if necessary */
11891 +               if (special_ACK || force) {
11892 +                       err = __rxrpc_call_gen_normal_ACK(call,do_ACK==RXRPC_ACK_DELAY ? 0 : seq);
11893 +                       if (err<0) {
11894 +                               ret = err;
11895 +                               goto out;
11896 +                       }
11897 +               }
11898 +       }
11899 +
11900 +       if (call->ackr.reason==RXRPC_ACK_REQUESTED)
11901 +               call->ackr_dfr_seq = seq;
11902 +
11903 +       /* start the ACK timer if not running if there are any pending deferred ACKs */
11904 +       if (call->ackr_pend_cnt>0 &&
11905 +           call->ackr.reason!=RXRPC_ACK_REQUESTED &&
11906 +           !timer_pending(&call->ackr_dfr_timo)
11907 +           ) {
11908 +               unsigned long timo;
11909 +
11910 +               timo = rxrpc_call_dfr_ack_timeout + jiffies;
11911 +
11912 +               _debug("START ACKR TIMER for cj=%lu",timo-call->cjif);
11913 +
11914 +               spin_lock(&call->lock);
11915 +               mod_timer(&call->ackr_dfr_timo,timo);
11916 +               spin_unlock(&call->lock);
11917 +       }
11918 +       else if ((call->ackr_pend_cnt==0 || call->ackr.reason==RXRPC_ACK_REQUESTED) &&
11919 +                timer_pending(&call->ackr_dfr_timo)
11920 +                ) {
11921 +               /* stop timer if no pending ACKs */
11922 +               _debug("CLEAR ACKR TIMER");
11923 +               del_timer_sync(&call->ackr_dfr_timo);
11924 +       }
11925 +
11926 +       /* send a special ACK if one is required */
11927 +       if (special_ACK) {
11928 +               struct rxrpc_ackpacket ack;
11929 +               struct iovec diov[2];
11930 +               u8 acks[1] = { RXRPC_ACK_TYPE_ACK };
11931 +
11932 +               /* fill out the appropriate form */
11933 +               ack.bufferSpace         = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
11934 +               ack.maxSkew             = htons(min(call->ackr_high_seq - seq,65535U));
11935 +               ack.firstPacket         = htonl(call->ackr_win_bot);
11936 +               ack.previousPacket      = call->ackr_prev_seq;
11937 +               ack.serial              = hdr->serial;
11938 +               ack.reason              = special_ACK;
11939 +               ack.nAcks               = 0;
11940 +               //ack.nAcks = special_ACK==RXRPC_ACK_OUT_OF_SEQUENCE ? 0 : hdr->seq ? 1 : 0;
11941 +
11942 +               _proto("Rx Sending s-ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
11943 +                      ntohs(ack.maxSkew),ntohl(ack.firstPacket),ntohl(ack.previousPacket),
11944 +                      ntohl(ack.serial),rxrpc_acks[ack.reason],ack.nAcks);
11945 +
11946 +               diov[0].iov_len  = sizeof(struct rxrpc_ackpacket);
11947 +               diov[0].iov_base = &ack;
11948 +               diov[1].iov_len  = sizeof(acks);
11949 +               diov[1].iov_base = acks;
11950 +
11951 +               /* build and send the message */
11952 +               err = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_ACK,
11953 +                                       hdr->seq ? 2 : 1,diov,
11954 +                                       GFP_KERNEL,
11955 +                                       &msg);
11956 +               if (err<0) {
11957 +                       ret = err;
11958 +                       goto out;
11959 +               }
11960 +
11961 +               msg->seq = seq;
11962 +               msg->hdr.seq = htonl(seq);
11963 +               msg->hdr.flags |= RXRPC_SLOW_START_OK;
11964 +
11965 +               err = rxrpc_conn_sendmsg(call->conn,msg);
11966 +               rxrpc_put_message(msg);
11967 +               if (err<0) {
11968 +                       ret = err;
11969 +                       goto out;
11970 +               }
11971 +               call->pkt_snd_count++;
11972 +       }
11973 +
11974 + out:
11975 +       if (hdr->seq)
11976 +               call->ackr_prev_seq = hdr->seq;
11977 +
11978 +       _leave(" = %d",ret);
11979 +       return ret;
11980 +} /* end rxrpc_call_generate_ACK() */
11981 +
11982 +/*****************************************************************************/
11983 +/*
11984 + * handle work to be done on a call
11985 + * - includes packet reception and timeout processing
11986 + */
11987 +void rxrpc_call_do_stuff(struct rxrpc_call *call)
11988 +{
11989 +       _enter("%p{flags=%lx}",call,call->flags);
11990 +
11991 +       /* handle packet reception */
11992 +       if (call->flags & RXRPC_CALL_RCV_PKT) {
11993 +               _debug("- receive packet");
11994 +               call->flags &= ~RXRPC_CALL_RCV_PKT;
11995 +               rxrpc_call_receive_packet(call);
11996 +       }
11997 +
11998 +       /* handle overdue ACKs */
11999 +       if (call->flags & RXRPC_CALL_ACKS_TIMO) {
12000 +               _debug("- overdue ACK timeout");
12001 +               call->flags &= ~RXRPC_CALL_ACKS_TIMO;
12002 +               rxrpc_call_resend(call,call->snd_seq_count);
12003 +       }
12004 +
12005 +       /* handle lack of reception */
12006 +       if (call->flags & RXRPC_CALL_RCV_TIMO) {
12007 +               _debug("- reception timeout");
12008 +               call->flags &= ~RXRPC_CALL_RCV_TIMO;
12009 +               rxrpc_call_abort(call,-EIO);
12010 +       }
12011 +
12012 +       /* handle deferred ACKs */
12013 +       if (call->flags & RXRPC_CALL_ACKR_TIMO ||
12014 +           (call->ackr.nAcks>0 && call->ackr.reason==RXRPC_ACK_REQUESTED)
12015 +           ) {
12016 +               _debug("- deferred ACK timeout: cj=%05lu r=%s n=%u",
12017 +                      jiffies - call->cjif,
12018 +                      rxrpc_acks[call->ackr.reason],
12019 +                      call->ackr.nAcks);
12020 +
12021 +               call->flags &= ~RXRPC_CALL_ACKR_TIMO;
12022 +
12023 +               if (call->ackr.nAcks>0 && call->app_call_state!=RXRPC_CSTATE_ERROR) {
12024 +                       /* generate ACK */
12025 +                       __rxrpc_call_gen_normal_ACK(call,call->ackr_dfr_seq);
12026 +                       call->ackr_dfr_seq = 0;
12027 +               }
12028 +       }
12029 +
12030 +       _leave("");
12031 +
12032 +} /* end rxrpc_call_do_timeout() */
12033 +
12034 +/*****************************************************************************/
12035 +/*
12036 + * send an abort message at call or connection level
12037 + * - must be called with call->lock held
12038 + * - the supplied error code is sent as the packet data
12039 + */
12040 +static int __rxrpc_call_abort(struct rxrpc_call *call, int errno)
12041 +{
12042 +       struct rxrpc_connection *conn = call->conn;
12043 +       struct rxrpc_message *msg;
12044 +       struct iovec diov[1];
12045 +       int ret;
12046 +       u32 _error;
12047 +
12048 +       _enter("%p{%08x},%p{%d},%d",conn,ntohl(conn->conn_id),call,ntohl(call->call_id),errno);
12049 +
12050 +       /* if this call is already aborted, then just wake up any waiters */
12051 +       if (call->app_call_state==RXRPC_CSTATE_ERROR) {
12052 +               spin_unlock(&call->lock);
12053 +               call->app_error_func(call);
12054 +               _leave(" = 0");
12055 +               return 0;
12056 +       }
12057 +
12058 +       rxrpc_get_call(call);
12059 +
12060 +       /* change the state _with_ the lock still held */
12061 +       call->app_call_state = RXRPC_CSTATE_ERROR;
12062 +       call->app_err_state = RXRPC_ESTATE_LOCAL_ABORT;
12063 +       call->app_errno = errno;
12064 +       call->app_mark = RXRPC_APP_MARK_EOF;
12065 +       call->app_read_buf = NULL;
12066 +       call->app_async_read = 0;
12067 +
12068 +       _state(call);
12069 +
12070 +       /* ask the app to translate the error code */
12071 +       call->app_aemap_func(call);
12072 +
12073 +       spin_unlock(&call->lock);
12074 +
12075 +       /* flush any outstanding ACKs */
12076 +       del_timer_sync(&call->acks_timeout);
12077 +       del_timer_sync(&call->rcv_timeout);
12078 +       del_timer_sync(&call->ackr_dfr_timo);
12079 +
12080 +       if (rxrpc_call_is_ack_pending(call))
12081 +               __rxrpc_call_gen_normal_ACK(call,0);
12082 +
12083 +       /* send the abort packet only if we actually traded some other packets */
12084 +       ret = 0;
12085 +       if (call->pkt_snd_count || call->pkt_rcv_count) {
12086 +               /* actually send the abort */
12087 +               _proto("Rx Sending Call ABORT { data=%d }",call->app_abort_code);
12088 +
12089 +               _error = htonl(call->app_abort_code);
12090 +
12091 +               diov[0].iov_len  = sizeof(_error);
12092 +               diov[0].iov_base = &_error;
12093 +
12094 +               ret = rxrpc_conn_newmsg(conn,call,RXRPC_PACKET_TYPE_ABORT,1,diov,GFP_KERNEL,&msg);
12095 +               if (ret==0) {
12096 +                       ret = rxrpc_conn_sendmsg(conn,msg);
12097 +                       rxrpc_put_message(msg);
12098 +               }
12099 +       }
12100 +
12101 +       /* tell the app layer to let go */
12102 +       call->app_error_func(call);
12103 +
12104 +       rxrpc_put_call(call);
12105 +
12106 +       _leave(" = %d",ret);
12107 +
12108 +       return ret;
12109 +} /* end __rxrpc_call_abort() */
12110 +
12111 +/*****************************************************************************/
12112 +/*
12113 + * send an abort message at call or connection level
12114 + * - the supplied error code is sent as the packet data
12115 + */
12116 +int rxrpc_call_abort(struct rxrpc_call *call, int error)
12117 +{
12118 +       spin_lock(&call->lock);
12119 +
12120 +       return __rxrpc_call_abort(call,error);
12121 +
12122 +} /* end rxrpc_call_abort() */
12123 +
12124 +/*****************************************************************************/
12125 +/*
12126 + * process packets waiting for this call
12127 + */
12128 +static void rxrpc_call_receive_packet(struct rxrpc_call *call)
12129 +{
12130 +       struct rxrpc_message *msg;
12131 +       struct list_head *_p;
12132 +       u32 data32;
12133 +
12134 +       _enter("%p",call);
12135 +
12136 +       rxrpc_get_call(call); /* must not go away too soon if aborted by app-layer */
12137 +
12138 +       while (!list_empty(&call->rcv_receiveq)) {
12139 +               /* try to get next packet */
12140 +               _p = NULL;
12141 +               spin_lock(&call->lock);
12142 +               if (!list_empty(&call->rcv_receiveq)) {
12143 +                       _p = call->rcv_receiveq.next;
12144 +                       list_del_init(_p);
12145 +               }
12146 +               spin_unlock(&call->lock);
12147 +
12148 +               if (!_p) break;
12149 +
12150 +               msg = list_entry(_p,struct rxrpc_message,link);
12151 +
12152 +               _proto("Rx %05lu Received %s packet (%%%u,#%u,%c%c%c%c%c)",
12153 +                      jiffies - call->cjif,
12154 +                      rxrpc_pkts[msg->hdr.type],
12155 +                      ntohl(msg->hdr.serial),
12156 +                      msg->seq,
12157 +                      msg->hdr.flags & RXRPC_JUMBO_PACKET      ? 'j' : '-',
12158 +                      msg->hdr.flags & RXRPC_MORE_PACKETS      ? 'm' : '-',
12159 +                      msg->hdr.flags & RXRPC_LAST_PACKET       ? 'l' : '-',
12160 +                      msg->hdr.flags & RXRPC_REQUEST_ACK       ? 'r' : '-',
12161 +                      msg->hdr.flags & RXRPC_CLIENT_INITIATED  ? 'C' : 'S'
12162 +                      );
12163 +
12164 +               switch (msg->hdr.type) {
12165 +                       /* deal with data packets */
12166 +               case RXRPC_PACKET_TYPE_DATA:
12167 +                       /* ACK the packet if necessary */
12168 +                       switch (rxrpc_call_generate_ACK(call,&msg->hdr,NULL)) {
12169 +                       case 0: /* useful packet */
12170 +                               rxrpc_call_receive_data_packet(call,msg);
12171 +                               break;
12172 +                       case 1: /* duplicate or out-of-window packet */
12173 +                               break;
12174 +                       default:
12175 +                               rxrpc_put_message(msg);
12176 +                               goto out;
12177 +                       }
12178 +                       break;
12179 +
12180 +                       /* deal with ACK packets */
12181 +               case RXRPC_PACKET_TYPE_ACK:
12182 +                       rxrpc_call_receive_ack_packet(call,msg);
12183 +                       break;
12184 +
12185 +                       /* deal with abort packets */
12186 +               case RXRPC_PACKET_TYPE_ABORT:
12187 +                       data32 = 0;
12188 +                       if (skb_copy_bits(msg->pkt,msg->offset,&data32,sizeof(data32))<0) {
12189 +                               printk("Rx Received short ABORT packet\n");
12190 +                       }
12191 +                       else {
12192 +                               data32 = ntohl(data32);
12193 +                       }
12194 +
12195 +                       _proto("Rx Received Call ABORT { data=%d }",data32);
12196 +
12197 +                       spin_lock(&call->lock);
12198 +                       call->app_call_state = RXRPC_CSTATE_ERROR;
12199 +                       call->app_err_state = RXRPC_ESTATE_PEER_ABORT;
12200 +                       call->app_abort_code = data32;
12201 +                       call->app_errno = -ECONNABORTED;
12202 +                       call->app_mark = RXRPC_APP_MARK_EOF;
12203 +                       call->app_read_buf = NULL;
12204 +                       call->app_async_read = 0;
12205 +
12206 +                       /* ask the app to translate the error code */
12207 +                       call->app_aemap_func(call);
12208 +                       _state(call);
12209 +                       spin_unlock(&call->lock);
12210 +                       call->app_error_func(call);
12211 +                       break;
12212 +
12213 +               default:
12214 +                       /* deal with other packet types */
12215 +                       _proto("Rx Unsupported packet type %u (#%u)",msg->hdr.type,msg->seq);
12216 +                       break;
12217 +               }
12218 +
12219 +               rxrpc_put_message(msg);
12220 +       }
12221 +
12222 + out:
12223 +       rxrpc_put_call(call);
12224 +       _leave("");
12225 +} /* end rxrpc_call_receive_packet() */
12226 +
12227 +/*****************************************************************************/
12228 +/*
12229 + * process next data packet
12230 + * - as the next data packet arrives:
12231 + *   - it is queued on app_readyq _if_ it is the next one expected (app_ready_seq+1)
12232 + *   - it is queued on app_unreadyq _if_ it is not the next one expected
12233 + *   - if a packet placed on app_readyq completely fills a hole leading up to the first packet
12234 + *     on app_unreadyq, then packets now in sequence are tranferred to app_readyq
12235 + * - the application layer can only see packets on app_readyq (app_ready_qty bytes)
12236 + * - the application layer is prodded every time a new packet arrives
12237 + */
12238 +static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc_message *msg)
12239 +{
12240 +       const struct rxrpc_operation *optbl, *op;
12241 +       struct rxrpc_message *pmsg;
12242 +       struct list_head *_p;
12243 +       int ret, lo, hi, rmtimo;
12244 +       u32 opid;
12245 +
12246 +       _enter("%p{%u},%p{%u}",call,ntohl(call->call_id),msg,msg->seq);
12247 +
12248 +       rxrpc_get_message(msg);
12249 +
12250 +       /* add to the unready queue if we'd have to create a hole in the ready queue otherwise */
12251 +       if (msg->seq != call->app_ready_seq+1) {
12252 +               _debug("Call add packet %d to unreadyq",msg->seq);
12253 +
12254 +               /* insert in seq order */
12255 +               list_for_each(_p,&call->app_unreadyq) {
12256 +                       pmsg = list_entry(_p,struct rxrpc_message,link);
12257 +                       if (pmsg->seq>msg->seq)
12258 +                               break;
12259 +               }
12260 +
12261 +               list_add_tail(&msg->link,_p);
12262 +
12263 +               _leave(" [unreadyq]");
12264 +               return;
12265 +       }
12266 +
12267 +       /* next in sequence - simply append into the call's ready queue */
12268 +       _debug("Call add packet %d to readyq (+%d => %d bytes)",
12269 +              msg->seq,msg->dsize,call->app_ready_qty);
12270 +
12271 +       spin_lock(&call->lock);
12272 +       call->app_ready_seq = msg->seq;
12273 +       call->app_ready_qty += msg->dsize;
12274 +       list_add_tail(&msg->link,&call->app_readyq);
12275 +
12276 +       /* move unready packets to the readyq if we got rid of a hole */
12277 +       while (!list_empty(&call->app_unreadyq)) {
12278 +               pmsg = list_entry(call->app_unreadyq.next,struct rxrpc_message,link);
12279 +
12280 +               if (pmsg->seq != call->app_ready_seq+1)
12281 +                       break;
12282 +
12283 +               /* next in sequence - just move list-to-list */
12284 +               _debug("Call transfer packet %d to readyq (+%d => %d bytes)",
12285 +                      pmsg->seq,pmsg->dsize,call->app_ready_qty);
12286 +
12287 +               call->app_ready_seq = pmsg->seq;
12288 +               call->app_ready_qty += pmsg->dsize;
12289 +               list_del_init(&pmsg->link);
12290 +               list_add_tail(&pmsg->link,&call->app_readyq);
12291 +       }
12292 +
12293 +       /* see if we've got the last packet yet */
12294 +       if (!list_empty(&call->app_readyq)) {
12295 +               pmsg = list_entry(call->app_readyq.prev,struct rxrpc_message,link);
12296 +               if (pmsg->hdr.flags & RXRPC_LAST_PACKET) {
12297 +                       call->app_last_rcv = 1;
12298 +                       _debug("Last packet on readyq");
12299 +               }
12300 +       }
12301 +
12302 +       switch (call->app_call_state) {
12303 +               /* do nothing if call already aborted */
12304 +       case RXRPC_CSTATE_ERROR:
12305 +               spin_unlock(&call->lock);
12306 +               _leave(" [error]");
12307 +               return;
12308 +
12309 +               /* extract the operation ID from an incoming call if that's not yet been done */
12310 +       case RXRPC_CSTATE_SRVR_RCV_OPID:
12311 +               spin_unlock(&call->lock);
12312 +
12313 +               /* handle as yet insufficient data for the operation ID */
12314 +               if (call->app_ready_qty<4) {
12315 +                       if (call->app_last_rcv)
12316 +                               rxrpc_call_abort(call,-EINVAL); /* trouble - last packet seen */
12317 +
12318 +                       _leave("");
12319 +                       return;
12320 +               }
12321 +
12322 +               /* pull the operation ID out of the buffer */
12323 +               ret = rxrpc_call_read_data(call,&opid,sizeof(opid),0);
12324 +               if (ret<0) {
12325 +                       printk("Unexpected error from read-data: %d\n",ret);
12326 +                       if (call->app_call_state!=RXRPC_CSTATE_ERROR)
12327 +                               rxrpc_call_abort(call,ret);
12328 +                       _leave("");
12329 +                       return;
12330 +               }
12331 +               call->app_opcode = ntohl(opid);
12332 +
12333 +               /* locate the operation in the available ops table */
12334 +               optbl = call->conn->service->ops_begin;
12335 +               lo = 0;
12336 +               hi = call->conn->service->ops_end - optbl;
12337 +
12338 +               while (lo<hi) {
12339 +                       int mid = (hi+lo) / 2;
12340 +                       op = &optbl[mid];
12341 +                       if (call->app_opcode==op->id)
12342 +                               goto found_op;
12343 +                       if (call->app_opcode>op->id)
12344 +                               lo = mid+1;
12345 +                       else
12346 +                               hi = mid;
12347 +               }
12348 +
12349 +               /* search failed */
12350 +               kproto("Rx Client requested operation %d from %s service",
12351 +                      call->app_opcode,call->conn->service->name);
12352 +               rxrpc_call_abort(call,-EINVAL);
12353 +               _leave(" [inval]");
12354 +               return;
12355 +
12356 +       found_op:
12357 +               _proto("Rx Client requested operation %s from %s service",
12358 +                      op->name,call->conn->service->name);
12359 +
12360 +               /* we're now waiting for the argument block (unless the call was aborted) */
12361 +               spin_lock(&call->lock);
12362 +               if (call->app_call_state==RXRPC_CSTATE_SRVR_RCV_OPID ||
12363 +                   call->app_call_state==RXRPC_CSTATE_SRVR_SND_REPLY) {
12364 +                       if (!call->app_last_rcv)
12365 +                               call->app_call_state = RXRPC_CSTATE_SRVR_RCV_ARGS;
12366 +                       else if (call->app_ready_qty>0)
12367 +                               call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS;
12368 +                       else
12369 +                               call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
12370 +                       call->app_mark = op->asize;
12371 +                       call->app_user = op->user;
12372 +               }
12373 +               spin_unlock(&call->lock);
12374 +
12375 +               _state(call);
12376 +               break;
12377 +
12378 +       case RXRPC_CSTATE_SRVR_RCV_ARGS:
12379 +               /* change state if just received last packet of arg block */
12380 +               if (call->app_last_rcv)
12381 +                       call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS;
12382 +               spin_unlock(&call->lock);
12383 +
12384 +               _state(call);
12385 +               break;
12386 +
12387 +       case RXRPC_CSTATE_CLNT_RCV_REPLY:
12388 +               /* change state if just received last packet of reply block */
12389 +               rmtimo = 0;
12390 +               if (call->app_last_rcv) {
12391 +                       call->app_call_state = RXRPC_CSTATE_CLNT_GOT_REPLY;
12392 +                       rmtimo = 1;
12393 +               }
12394 +               spin_unlock(&call->lock);
12395 +
12396 +               if (rmtimo) {
12397 +                       del_timer_sync(&call->acks_timeout);
12398 +                       del_timer_sync(&call->rcv_timeout);
12399 +                       del_timer_sync(&call->ackr_dfr_timo);
12400 +               }
12401 +
12402 +               _state(call);
12403 +               break;
12404 +
12405 +       default:
12406 +               /* deal with data reception in an unexpected state */
12407 +               printk("Unexpected state [[[ %u ]]]\n",call->app_call_state);
12408 +               __rxrpc_call_abort(call,-EBADMSG);
12409 +               _leave("");
12410 +               return;
12411 +       }
12412 +
12413 +       if (call->app_call_state==RXRPC_CSTATE_CLNT_RCV_REPLY && call->app_last_rcv)
12414 +               BUG();
12415 +
12416 +       /* otherwise just invoke the data function whenever we can satisfy its desire for more
12417 +        * data
12418 +        */
12419 +       _proto("Rx Received Op Data: st=%u qty=%u mk=%u%s",
12420 +              call->app_call_state,call->app_ready_qty,call->app_mark,
12421 +              call->app_last_rcv ? " last-rcvd" : "");
12422 +
12423 +       spin_lock(&call->lock);
12424 +
12425 +       ret = __rxrpc_call_read_data(call);
12426 +       switch (ret) {
12427 +       case 0:
12428 +               spin_unlock(&call->lock);
12429 +               call->app_attn_func(call);
12430 +               break;
12431 +       case -EAGAIN:
12432 +               spin_unlock(&call->lock);
12433 +               break;
12434 +       case -ECONNABORTED:
12435 +               spin_unlock(&call->lock);
12436 +               break;
12437 +       default:                        
12438 +               __rxrpc_call_abort(call,ret);
12439 +               break;
12440 +       }
12441 +
12442 +       _state(call);
12443 +
12444 +       _leave("");
12445 +
12446 +} /* end rxrpc_call_receive_data_packet() */
12447 +
12448 +/*****************************************************************************/
12449 +/*
12450 + * received an ACK packet
12451 + */
12452 +static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_message *msg)
12453 +{
12454 +       struct rxrpc_ackpacket ack;
12455 +       rxrpc_serial_t serial;
12456 +       rxrpc_seq_t seq;
12457 +       int ret;
12458 +
12459 +       _enter("%p{%u},%p{%u}",call,ntohl(call->call_id),msg,msg->seq);
12460 +
12461 +       /* extract the basic ACK record */
12462 +       if (skb_copy_bits(msg->pkt,msg->offset,&ack,sizeof(ack))<0) {
12463 +               printk("Rx Received short ACK packet\n");
12464 +               return;
12465 +       }
12466 +       msg->offset += sizeof(ack);
12467 +
12468 +       serial = ack.serial;
12469 +       seq = ntohl(ack.firstPacket);
12470 +
12471 +       _proto("Rx Received ACK %%%d { b=%hu m=%hu f=%u p=%u s=%u r=%s n=%u }",
12472 +              ntohl(msg->hdr.serial),
12473 +              ntohs(ack.bufferSpace),
12474 +              ntohs(ack.maxSkew),
12475 +              seq,
12476 +              ntohl(ack.previousPacket),
12477 +              ntohl(serial),
12478 +              rxrpc_acks[ack.reason],
12479 +              call->ackr.nAcks
12480 +              );
12481 +
12482 +       /* check the other side isn't ACK'ing a sequence number I haven't sent yet */
12483 +       if (ack.nAcks>0 && (seq > call->snd_seq_count || seq+ack.nAcks-1 > call->snd_seq_count)) {
12484 +               printk("Received ACK (#%u-#%u) for unsent packet\n",seq,seq+ack.nAcks-1);
12485 +               rxrpc_call_abort(call,-EINVAL);
12486 +               _leave("");
12487 +               return;
12488 +       }
12489 +
12490 +       /* deal with RTT calculation */
12491 +       if (serial) {
12492 +               struct rxrpc_message *rttmsg;
12493 +
12494 +               /* find the prompting packet */
12495 +               spin_lock(&call->lock);
12496 +               if (call->snd_ping && call->snd_ping->hdr.serial==serial) {
12497 +                       /* it was a ping packet */
12498 +                       rttmsg = call->snd_ping;
12499 +                       call->snd_ping = NULL;
12500 +                       spin_unlock(&call->lock);
12501 +
12502 +                       if (rttmsg) {
12503 +                               rttmsg->rttdone = 1;
12504 +                               rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
12505 +                               rxrpc_put_message(rttmsg);
12506 +                       }
12507 +               }
12508 +               else {
12509 +                       struct list_head *_p;
12510 +
12511 +                       /* it ought to be a data packet - look in the pending ACK list */
12512 +                       list_for_each(_p,&call->acks_pendq) {
12513 +                               rttmsg = list_entry(_p,struct rxrpc_message,link);
12514 +                               if (rttmsg->hdr.serial==serial) {
12515 +                                       if (rttmsg->rttdone)
12516 +                                               break; /* never do RTT twice without resending */
12517 +
12518 +                                       rttmsg->rttdone = 1;
12519 +                                       rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
12520 +                                       break;
12521 +                               }
12522 +                       }
12523 +                       spin_unlock(&call->lock);
12524 +               }
12525 +       }
12526 +
12527 +       switch (ack.reason) {
12528 +               /* deal with negative/positive acknowledgement of data packets */
12529 +       case RXRPC_ACK_REQUESTED:
12530 +       case RXRPC_ACK_DELAY:
12531 +       case RXRPC_ACK_IDLE:
12532 +               rxrpc_call_definitively_ACK(call,seq-1);
12533 +
12534 +       case RXRPC_ACK_DUPLICATE:
12535 +       case RXRPC_ACK_OUT_OF_SEQUENCE:
12536 +       case RXRPC_ACK_EXCEEDS_WINDOW:
12537 +               call->snd_resend_cnt = 0;
12538 +               ret = rxrpc_call_record_ACK(call,msg,seq,ack.nAcks);
12539 +               if (ret<0)
12540 +                       rxrpc_call_abort(call,ret);
12541 +               break;
12542 +
12543 +               /* respond to ping packets immediately */
12544 +       case RXRPC_ACK_PING:
12545 +               rxrpc_call_generate_ACK(call,&msg->hdr,&ack);
12546 +               break;
12547 +
12548 +               /* only record RTT on ping response packets */
12549 +       case RXRPC_ACK_PING_RESPONSE:
12550 +               if (call->snd_ping) {
12551 +                       struct rxrpc_message *rttmsg;
12552 +
12553 +                       /* only do RTT stuff if the response matches the retained ping */
12554 +                       rttmsg = NULL;
12555 +                       spin_lock(&call->lock);
12556 +                       if (call->snd_ping && call->snd_ping->hdr.serial==ack.serial) {
12557 +                               rttmsg = call->snd_ping;
12558 +                               call->snd_ping = NULL;
12559 +                       }
12560 +                       spin_unlock(&call->lock);
12561 +
12562 +                       if (rttmsg) {
12563 +                               rttmsg->rttdone = 1;
12564 +                               rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
12565 +                               rxrpc_put_message(rttmsg);
12566 +                       }
12567 +               }
12568 +               break;
12569 +
12570 +       default:
12571 +               printk("Unsupported ACK reason %u\n",ack.reason);
12572 +               break;
12573 +       }
12574 +
12575 +       _leave("");
12576 +} /* end rxrpc_call_receive_ack_packet() */
12577 +
12578 +/*****************************************************************************/
12579 +/*
12580 + * record definitive ACKs for all messages up to and including the one with the 'highest' seq
12581 + */
12582 +static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, rxrpc_seq_t highest)
12583 +{
12584 +       struct rxrpc_message *msg;
12585 +       int now_complete;
12586 +
12587 +       _enter("%p{ads=%u},%u",call,call->acks_dftv_seq,highest);
12588 +
12589 +       while (call->acks_dftv_seq<highest) {
12590 +               call->acks_dftv_seq++;
12591 +
12592 +               _proto("Definitive ACK on packet #%u",call->acks_dftv_seq);
12593 +
12594 +               /* discard those at front of queue until message with highest ACK is found */
12595 +               spin_lock(&call->lock);
12596 +               msg = NULL;
12597 +               if (!list_empty(&call->acks_pendq)) {
12598 +                       msg = list_entry(call->acks_pendq.next,struct rxrpc_message,link);
12599 +                       list_del_init(&msg->link); /* dequeue */
12600 +                       if (msg->state==RXRPC_MSG_SENT)
12601 +                               call->acks_pend_cnt--;
12602 +               }
12603 +               spin_unlock(&call->lock);
12604 +
12605 +               /* insanity check */
12606 +               if (!msg)
12607 +                       panic("%s(): acks_pendq unexpectedly empty\n",__FUNCTION__);
12608 +
12609 +               if (msg->seq!=call->acks_dftv_seq)
12610 +                       panic("%s(): Packet #%u expected at front of acks_pendq (#%u found)\n",
12611 +                             __FUNCTION__,call->acks_dftv_seq,msg->seq);
12612 +
12613 +               /* discard the message */
12614 +               msg->state = RXRPC_MSG_DONE;
12615 +               rxrpc_put_message(msg);
12616 +       }
12617 +
12618 +       /* if all sent packets are definitively ACK'd then prod any sleepers just in case */
12619 +       now_complete = 0;
12620 +       spin_lock(&call->lock);
12621 +       if (call->acks_dftv_seq==call->snd_seq_count) {
12622 +               if (call->app_call_state!=RXRPC_CSTATE_COMPLETE) {
12623 +                       call->app_call_state = RXRPC_CSTATE_COMPLETE;
12624 +                       _state(call);
12625 +                       now_complete = 1;
12626 +               }
12627 +       }
12628 +       spin_unlock(&call->lock);
12629 +
12630 +       if (now_complete) {
12631 +               del_timer_sync(&call->acks_timeout);
12632 +               del_timer_sync(&call->rcv_timeout);
12633 +               del_timer_sync(&call->ackr_dfr_timo);
12634 +               call->app_attn_func(call);
12635 +       }
12636 +
12637 +       _leave("");
12638 +} /* end rxrpc_call_definitively_ACK() */
12639 +
12640 +/*****************************************************************************/
12641 +/*
12642 + * record the specified amount of ACKs/NAKs
12643 + */
12644 +static int rxrpc_call_record_ACK(struct rxrpc_call *call,
12645 +                                struct rxrpc_message *msg,
12646 +                                rxrpc_seq_t seq,
12647 +                                size_t count)
12648 +{
12649 +       struct rxrpc_message *dmsg;
12650 +       struct list_head *_p;
12651 +       rxrpc_seq_t highest;
12652 +       unsigned ix;
12653 +       size_t chunk;
12654 +       char resend, now_complete;
12655 +       u8 acks[16];
12656 +
12657 +       _enter("%p{apc=%u ads=%u},%p,%u,%u",
12658 +              call,call->acks_pend_cnt,call->acks_dftv_seq,msg,seq,count);
12659 +
12660 +       /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order ACKs) */
12661 +       if (seq<=call->acks_dftv_seq) {
12662 +               unsigned delta = call->acks_dftv_seq - seq;
12663 +
12664 +               if (count<=delta) {
12665 +                       _leave(" = 0 [all definitively ACK'd]");
12666 +                       return 0;
12667 +               }
12668 +
12669 +               seq += delta;
12670 +               count -= delta;
12671 +               msg->offset += delta;
12672 +       }
12673 +
12674 +       highest = seq + count - 1;
12675 +       resend = 0;
12676 +       while (count>0) {
12677 +               /* extract up to 16 ACK slots at a time */
12678 +               chunk = min(count,sizeof(acks));
12679 +               count -= chunk;
12680 +
12681 +               memset(acks,2,sizeof(acks));
12682 +
12683 +               if (skb_copy_bits(msg->pkt,msg->offset,&acks,chunk)<0) {
12684 +                       printk("Rx Received short ACK packet\n");
12685 +                       _leave(" = -EINVAL");
12686 +                       return -EINVAL;
12687 +               }
12688 +               msg->offset += chunk;
12689 +
12690 +               /* check that the ACK set is valid */
12691 +               for (ix=0; ix<chunk; ix++) {
12692 +                       switch (acks[ix]) {
12693 +                       case RXRPC_ACK_TYPE_ACK:
12694 +                               break;
12695 +                       case RXRPC_ACK_TYPE_NACK:
12696 +                               resend = 1;
12697 +                               break;
12698 +                       default:
12699 +                               printk("Rx Received unsupported ACK state %u\n",acks[ix]);
12700 +                               _leave(" = -EINVAL");
12701 +                               return -EINVAL;
12702 +                       }
12703 +               }
12704 +
12705 +               _proto("Rx ACK of packets #%u-#%u [%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)",
12706 +                      seq,seq+chunk-1,
12707 +                      _acktype[acks[0x0]],
12708 +                      _acktype[acks[0x1]],
12709 +                      _acktype[acks[0x2]],
12710 +                      _acktype[acks[0x3]],
12711 +                      _acktype[acks[0x4]],
12712 +                      _acktype[acks[0x5]],
12713 +                      _acktype[acks[0x6]],
12714 +                      _acktype[acks[0x7]],
12715 +                      _acktype[acks[0x8]],
12716 +                      _acktype[acks[0x9]],
12717 +                      _acktype[acks[0xA]],
12718 +                      _acktype[acks[0xB]],
12719 +                      _acktype[acks[0xC]],
12720 +                      _acktype[acks[0xD]],
12721 +                      _acktype[acks[0xE]],
12722 +                      _acktype[acks[0xF]],
12723 +                      call->acks_pend_cnt
12724 +                      );
12725 +
12726 +               /* mark the packets in the ACK queue as being provisionally ACK'd */
12727 +               ix = 0;
12728 +               spin_lock(&call->lock);
12729 +
12730 +               /* find the first packet ACK'd/NAK'd here */
12731 +               list_for_each(_p,&call->acks_pendq) {
12732 +                       dmsg = list_entry(_p,struct rxrpc_message,link);
12733 +                       if (dmsg->seq==seq)
12734 +                               goto found_first;
12735 +                       _debug("- %u: skipping #%u",ix,dmsg->seq);
12736 +               }
12737 +               goto bad_queue;
12738 +
12739 +       found_first:
12740 +               do {
12741 +                       _debug("- %u: processing #%u (%c) apc=%u",
12742 +                              ix,dmsg->seq,_acktype[acks[ix]],call->acks_pend_cnt);
12743 +
12744 +                       if (acks[ix]==RXRPC_ACK_TYPE_ACK) {
12745 +                               if (dmsg->state==RXRPC_MSG_SENT) call->acks_pend_cnt--;
12746 +                               dmsg->state = RXRPC_MSG_ACKED;
12747 +                       }
12748 +                       else {
12749 +                               if (dmsg->state==RXRPC_MSG_ACKED) call->acks_pend_cnt++;
12750 +                               dmsg->state = RXRPC_MSG_SENT;
12751 +                       }
12752 +                       ix++;
12753 +                       seq++;
12754 +
12755 +                       _p = dmsg->link.next;
12756 +                       dmsg = list_entry(_p,struct rxrpc_message,link);
12757 +               } while(ix<chunk && _p!=&call->acks_pendq && dmsg->seq==seq);
12758 +
12759 +               if (ix<chunk)
12760 +                       goto bad_queue;
12761 +
12762 +               spin_unlock(&call->lock);
12763 +       }
12764 +
12765 +       if (resend)
12766 +               rxrpc_call_resend(call,highest);
12767 +
12768 +       /* if all packets are provisionally ACK'd, then wake up anyone who's waiting for that */
12769 +       now_complete = 0;
12770 +       spin_lock(&call->lock);
12771 +       if (call->acks_pend_cnt==0) {
12772 +               if (call->app_call_state==RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) {
12773 +                       call->app_call_state = RXRPC_CSTATE_COMPLETE;
12774 +                       _state(call);
12775 +               }
12776 +               now_complete = 1;
12777 +       }
12778 +       spin_unlock(&call->lock);
12779 +
12780 +       if (now_complete) {
12781 +               _debug("- wake up waiters");
12782 +               del_timer_sync(&call->acks_timeout);
12783 +               del_timer_sync(&call->rcv_timeout);
12784 +               del_timer_sync(&call->ackr_dfr_timo);
12785 +               call->app_attn_func(call);
12786 +       }
12787 +
12788 +       _leave(" = 0 (apc=%u)",call->acks_pend_cnt);
12789 +       return 0;
12790 +
12791 + bad_queue:
12792 +       panic("%s(): acks_pendq in bad state (packet #%u absent)\n",__FUNCTION__,seq);
12793 +
12794 +} /* end rxrpc_call_record_ACK() */
12795 +
12796 +/*****************************************************************************/
12797 +/*
12798 + * transfer data from the ready packet queue to the asynchronous read buffer
12799 + * - since this func is the only one going to look at packets queued on app_readyq, we don't need
12800 + *   a lock to modify or access them, only to modify the queue pointers
12801 + * - called with call->lock held
12802 + * - the buffer must be in kernel space
12803 + * - returns:
12804 + *     0 if buffer filled
12805 + *     -EAGAIN if buffer not filled and more data to come
12806 + *     -EBADMSG if last packet received and insufficient data left
12807 + *     -ECONNABORTED if the call has in an error state
12808 + */
12809 +static int __rxrpc_call_read_data(struct rxrpc_call *call)
12810 +{
12811 +       struct rxrpc_message *msg;
12812 +       size_t qty;
12813 +       int ret;
12814 +
12815 +       _enter("%p{as=%d buf=%p qty=%u/%u}",
12816 +              call,call->app_async_read,call->app_read_buf,call->app_ready_qty,call->app_mark);
12817 +
12818 +       /* check the state */
12819 +       switch (call->app_call_state) {
12820 +       case RXRPC_CSTATE_SRVR_RCV_ARGS:
12821 +       case RXRPC_CSTATE_CLNT_RCV_REPLY:
12822 +               if (call->app_last_rcv) {
12823 +                       printk("%s(%p,%p,%d): Inconsistent call state (%s, last pkt)",
12824 +                             __FUNCTION__,call,call->app_read_buf,call->app_mark,
12825 +                             rxrpc_call_states[call->app_call_state]);
12826 +                       BUG();
12827 +               }
12828 +               break;
12829 +
12830 +       case RXRPC_CSTATE_SRVR_RCV_OPID:
12831 +       case RXRPC_CSTATE_SRVR_GOT_ARGS:
12832 +       case RXRPC_CSTATE_CLNT_GOT_REPLY:
12833 +               break;
12834 +
12835 +       case RXRPC_CSTATE_SRVR_SND_REPLY:
12836 +               if (!call->app_last_rcv) {
12837 +                       printk("%s(%p,%p,%d): Inconsistent call state (%s, not last pkt)",
12838 +                             __FUNCTION__,call,call->app_read_buf,call->app_mark,
12839 +                             rxrpc_call_states[call->app_call_state]);
12840 +                       BUG();
12841 +               }
12842 +               _debug("Trying to read data from call in SND_REPLY state");
12843 +               break;
12844 +
12845 +       case RXRPC_CSTATE_ERROR:
12846 +               _leave(" = -ECONNABORTED");
12847 +               return -ECONNABORTED;
12848 +
12849 +       default:
12850 +               printk("reading in unexpected state [[[ %u ]]]\n",call->app_call_state);
12851 +               BUG();
12852 +       }
12853 +
12854 +       /* handle the case of not having an async buffer */
12855 +       if (!call->app_async_read) {
12856 +               if (call->app_mark==RXRPC_APP_MARK_EOF) {
12857 +                       ret = call->app_last_rcv ? 0 : -EAGAIN;
12858 +               }
12859 +               else {
12860 +                       if (call->app_mark >= call->app_ready_qty) {
12861 +                               call->app_mark = RXRPC_APP_MARK_EOF;
12862 +                               ret = 0;
12863 +                       }
12864 +                       else {
12865 +                               ret = call->app_last_rcv ? -EBADMSG : -EAGAIN;
12866 +                       }
12867 +               }
12868 +
12869 +               _leave(" = %d [no buf]",ret);
12870 +               return 0;
12871 +       }
12872 +
12873 +       while (!list_empty(&call->app_readyq) && call->app_mark>0) {
12874 +               msg = list_entry(call->app_readyq.next,struct rxrpc_message,link);
12875 +
12876 +               /* drag as much data as we need out of this packet */
12877 +               qty = min(call->app_mark,msg->dsize);
12878 +
12879 +               _debug("reading %u from skb=%p off=%lu",qty,msg->pkt,msg->offset);
12880 +
12881 +               if (call->app_read_buf)
12882 +                       if (skb_copy_bits(msg->pkt,msg->offset,call->app_read_buf,qty)<0)
12883 +                               panic("%s: Failed to copy data from packet: (%p,%p,%d)",
12884 +                                     __FUNCTION__,call,call->app_read_buf,qty);
12885 +
12886 +               /* if that packet is now empty, discard it */
12887 +               call->app_ready_qty -= qty;
12888 +               msg->dsize -= qty;
12889 +
12890 +               if (msg->dsize==0) {
12891 +                       list_del_init(&msg->link);
12892 +                       rxrpc_put_message(msg);
12893 +               }
12894 +               else {
12895 +                       msg->offset += qty;
12896 +               }
12897 +
12898 +               call->app_mark -= qty;
12899 +               if (call->app_read_buf) call->app_read_buf += qty;
12900 +       }
12901 +
12902 +       if (call->app_mark==0) {
12903 +               call->app_async_read = 0;
12904 +               call->app_mark = RXRPC_APP_MARK_EOF;
12905 +               call->app_read_buf = NULL;
12906 +
12907 +               /* adjust the state if used up all packets */
12908 +               if (list_empty(&call->app_readyq) && call->app_last_rcv) {
12909 +                       switch (call->app_call_state) {
12910 +                       case RXRPC_CSTATE_SRVR_RCV_OPID:
12911 +                               call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
12912 +                               call->app_mark = RXRPC_APP_MARK_EOF;
12913 +                               _state(call);
12914 +                               del_timer_sync(&call->rcv_timeout);
12915 +                               break;
12916 +                       case RXRPC_CSTATE_SRVR_GOT_ARGS:
12917 +                               call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
12918 +                               _state(call);
12919 +                               del_timer_sync(&call->rcv_timeout);
12920 +                               break;
12921 +                       default:
12922 +                               call->app_call_state = RXRPC_CSTATE_COMPLETE;
12923 +                               _state(call);
12924 +                               del_timer_sync(&call->acks_timeout);
12925 +                               del_timer_sync(&call->ackr_dfr_timo);
12926 +                               del_timer_sync(&call->rcv_timeout);
12927 +                               break;
12928 +                       }
12929 +               }
12930 +
12931 +               _leave(" = 0");
12932 +               return 0;
12933 +       }
12934 +
12935 +       if (call->app_last_rcv) {
12936 +               _debug("Insufficient data (%u/%u)",call->app_ready_qty,call->app_mark);
12937 +               call->app_async_read = 0;
12938 +               call->app_mark = RXRPC_APP_MARK_EOF;
12939 +               call->app_read_buf = NULL;
12940 +
12941 +               _leave(" = -EBADMSG");
12942 +               return -EBADMSG;
12943 +       }
12944 +
12945 +       _leave(" = -EAGAIN");
12946 +       return -EAGAIN;
12947 +} /* end __rxrpc_call_read_data() */
12948 +
12949 +/*****************************************************************************/
12950 +/*
12951 + * attempt to read the specified amount of data from the call's ready queue into the buffer
12952 + * provided
12953 + * - since this func is the only one going to look at packets queued on app_readyq, we don't need
12954 + *   a lock to modify or access them, only to modify the queue pointers
12955 + * - if the buffer pointer is NULL, then data is merely drained, not copied
12956 + * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is enough data or an
12957 + *   error will be generated
12958 + *   - note that the caller must have added the calling task to the call's wait queue beforehand
12959 + * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this function doesn't read
12960 + *   all available data
12961 + */
12962 +int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int flags)
12963 +{
12964 +       int ret;
12965 +
12966 +       _enter("%p{arq=%u},%p,%d,%x",call,call->app_ready_qty,buffer,size,flags);
12967 +
12968 +       spin_lock(&call->lock);
12969 +
12970 +       if (unlikely(!!call->app_read_buf)) {
12971 +               spin_unlock(&call->lock);
12972 +               _leave(" = -EBUSY");
12973 +               return -EBUSY;
12974 +       }
12975 +
12976 +       call->app_mark = size;
12977 +       call->app_read_buf = buffer;
12978 +       call->app_async_read = 1;
12979 +       call->app_read_count++;
12980 +
12981 +       /* read as much data as possible */
12982 +       ret = __rxrpc_call_read_data(call);
12983 +       switch (ret) {
12984 +       case 0:
12985 +               if (flags&RXRPC_CALL_READ_ALL && (!call->app_last_rcv || call->app_ready_qty>0)) {
12986 +                       _leave(" = -EBADMSG");
12987 +                       __rxrpc_call_abort(call,-EBADMSG);
12988 +                       return -EBADMSG;
12989 +               }
12990 +
12991 +               spin_unlock(&call->lock);
12992 +               call->app_attn_func(call);
12993 +               _leave(" = 0");
12994 +               return ret;
12995 +
12996 +       case -ECONNABORTED:
12997 +               spin_unlock(&call->lock);
12998 +               _leave(" = %d [aborted]",ret);
12999 +               return ret;
13000 +
13001 +       default:                        
13002 +               __rxrpc_call_abort(call,ret);
13003 +               _leave(" = %d",ret);
13004 +               return ret;
13005 +
13006 +       case -EAGAIN:
13007 +               spin_unlock(&call->lock);
13008 +
13009 +               if (!(flags&RXRPC_CALL_READ_BLOCK)) {
13010 +                       _leave(" = -EAGAIN");
13011 +                       return -EAGAIN;
13012 +               }
13013 +
13014 +               /* wait for the data to arrive */
13015 +               _debug("blocking for data arrival");
13016 +
13017 +               for (;;) {
13018 +                       set_current_state(TASK_INTERRUPTIBLE);
13019 +                       if (!call->app_async_read || signal_pending(current))
13020 +                               break;
13021 +                       schedule();
13022 +               }
13023 +               set_current_state(TASK_RUNNING);
13024 +
13025 +               if (signal_pending(current)) {
13026 +                       _leave(" = -EINTR");
13027 +                       return -EINTR;
13028 +               }
13029 +
13030 +               if (call->app_call_state==RXRPC_CSTATE_ERROR) {
13031 +                       _leave(" = -ECONNABORTED");
13032 +                       return -ECONNABORTED;
13033 +               }
13034 +
13035 +               _leave(" = 0");
13036 +               return 0;
13037 +       }
13038 +
13039 +} /* end rxrpc_call_read_data() */
13040 +
13041 +/*****************************************************************************/
13042 +/*
13043 + * write data to a call
13044 + * - the data may not be sent immediately if it doesn't fill a buffer
13045 + * - if we can't queue all the data for buffering now, siov[] will have been adjusted to take
13046 + *   account of what has been sent
13047 + */
13048 +int rxrpc_call_write_data(struct rxrpc_call *call,
13049 +                         size_t sioc,
13050 +                         struct iovec siov[],
13051 +                         u8 rxhdr_flags,
13052 +                         int alloc_flags,
13053 +                         int dup_data,
13054 +                         size_t *size_sent)
13055 +{
13056 +       struct rxrpc_message *msg;
13057 +       struct iovec *sptr;
13058 +       size_t space, size, chunk, tmp;
13059 +       char *buf;
13060 +       int ret;
13061 +
13062 +       _enter("%p,%u,%p,%02x,%x,%d,%p",call,sioc,siov,rxhdr_flags,alloc_flags,dup_data,size_sent);
13063 +
13064 +       *size_sent = 0;
13065 +       size = 0;
13066 +       ret = -EINVAL;
13067 +
13068 +       /* can't send more if we've sent last packet from this end */
13069 +       switch (call->app_call_state) {
13070 +       case RXRPC_CSTATE_SRVR_SND_REPLY:
13071 +       case RXRPC_CSTATE_CLNT_SND_ARGS:
13072 +               break;
13073 +       case RXRPC_CSTATE_ERROR:
13074 +               ret = call->app_errno;
13075 +       default:
13076 +               goto out;
13077 +       }
13078 +
13079 +       /* calculate how much data we've been given */
13080 +       sptr = siov;
13081 +       for (; sioc>0; sptr++, sioc--) {
13082 +               if (!sptr->iov_len) continue;
13083 +
13084 +               if (!sptr->iov_base)
13085 +                       goto out;
13086 +
13087 +               size += sptr->iov_len;
13088 +       }
13089 +
13090 +       _debug("- size=%u mtu=%u",size,call->conn->mtu_size);
13091 +
13092 +       do {
13093 +               /* make sure there's a message under construction */
13094 +               if (!call->snd_nextmsg) {
13095 +                       /* no - allocate a message with no data yet attached */
13096 +                       ret = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_DATA,
13097 +                                               0,NULL,alloc_flags,&call->snd_nextmsg);
13098 +                       if (ret<0)
13099 +                               goto out;
13100 +                       _debug("- allocated new message [ds=%u]",call->snd_nextmsg->dsize);
13101 +               }
13102 +
13103 +               msg = call->snd_nextmsg;
13104 +               msg->hdr.flags |= rxhdr_flags;
13105 +
13106 +               /* deal with zero-length terminal packet */
13107 +               if (size==0) {
13108 +                       if (rxhdr_flags & RXRPC_LAST_PACKET) {
13109 +                               ret = rxrpc_call_flush(call);
13110 +                               if (ret<0)
13111 +                                       goto out;
13112 +                       }
13113 +                       break;
13114 +               }
13115 +
13116 +               /* work out how much space current packet has available */
13117 +               space = call->conn->mtu_size - msg->dsize;
13118 +               chunk = min(space,size);
13119 +
13120 +               _debug("- [before] space=%u chunk=%u",space,chunk);
13121 +
13122 +               while (!siov->iov_len)
13123 +                       siov++;
13124 +
13125 +               /* if we are going to have to duplicate the data then coalesce it too */
13126 +               if (dup_data) {
13127 +                       /* don't allocate more that 1 page at a time */
13128 +                       if (chunk>PAGE_SIZE)
13129 +                               chunk = PAGE_SIZE;
13130 +
13131 +                       /* allocate a data buffer and attach to the message */
13132 +                       buf = kmalloc(chunk,alloc_flags);
13133 +                       if (unlikely(!buf)) {
13134 +                               if (msg->dsize==sizeof(struct rxrpc_header)) {
13135 +                                       /* discard an empty msg and wind back the seq counter */
13136 +                                       rxrpc_put_message(msg);
13137 +                                       call->snd_nextmsg = NULL;
13138 +                                       call->snd_seq_count--;
13139 +                               }
13140 +
13141 +                               ret = -ENOMEM;
13142 +                               goto out;
13143 +                       }
13144 +
13145 +                       tmp = msg->dcount++;
13146 +                       set_bit(tmp,&msg->dfree);
13147 +                       msg->data[tmp].iov_base = buf;
13148 +                       msg->data[tmp].iov_len = chunk;
13149 +                       msg->dsize += chunk;
13150 +                       *size_sent += chunk;
13151 +                       size -= chunk;
13152 +
13153 +                       /* load the buffer with data */
13154 +                       while (chunk>0) {
13155 +                               tmp = min(chunk,siov->iov_len);
13156 +                               memcpy(buf,siov->iov_base,tmp);
13157 +                               buf += tmp;
13158 +                               siov->iov_base += tmp;
13159 +                               siov->iov_len -= tmp;
13160 +                               if (!siov->iov_len)
13161 +                                       siov++;
13162 +                               chunk -= tmp;
13163 +                       }
13164 +               }
13165 +               else {
13166 +                       /* we want to attach the supplied buffers directly */
13167 +                       while (chunk>0 && msg->dcount<RXRPC_MSG_MAX_IOCS) {
13168 +                               tmp = msg->dcount++;
13169 +                               msg->data[tmp].iov_base = siov->iov_base;
13170 +                               msg->data[tmp].iov_len = siov->iov_len;
13171 +                               msg->dsize += siov->iov_len;
13172 +                               *size_sent += siov->iov_len;
13173 +                               size -= siov->iov_len;
13174 +                               chunk -= siov->iov_len;
13175 +                               siov++;
13176 +                       }
13177 +               }
13178 +
13179 +               _debug("- [loaded] chunk=%u size=%u",chunk,size);
13180 +
13181 +               /* dispatch the message when full, final or requesting ACK */
13182 +               if (msg->dsize>=call->conn->mtu_size || rxhdr_flags) {
13183 +                       ret = rxrpc_call_flush(call);
13184 +                       if (ret<0)
13185 +                               goto out;
13186 +               }
13187 +
13188 +       } while(size>0);
13189 +
13190 +       ret = 0;
13191 + out:
13192 +       _leave(" = %d (%d queued, %d rem)",ret,*size_sent,size);
13193 +       return ret;
13194 +
13195 +} /* end rxrpc_call_write_data() */
13196 +
13197 +/*****************************************************************************/
13198 +/*
13199 + * flush outstanding packets to the network
13200 + */
13201 +int rxrpc_call_flush(struct rxrpc_call *call)
13202 +{
13203 +       struct rxrpc_message *msg;
13204 +       int ret = 0;
13205 +
13206 +       _enter("%p",call);
13207 +
13208 +       rxrpc_get_call(call);
13209 +
13210 +       /* if there's a packet under construction, then dispatch it now */
13211 +       if (call->snd_nextmsg) {
13212 +               msg = call->snd_nextmsg;
13213 +               call->snd_nextmsg = NULL;
13214 +
13215 +               if (msg->hdr.flags & RXRPC_LAST_PACKET) {
13216 +                       msg->hdr.flags &= ~RXRPC_MORE_PACKETS;
13217 +                       msg->hdr.flags |= RXRPC_REQUEST_ACK;
13218 +               }
13219 +               else {
13220 +                       msg->hdr.flags |= RXRPC_MORE_PACKETS;
13221 +               }
13222 +
13223 +               _proto("Sending DATA message { ds=%u dc=%u df=%02lu }",
13224 +                      msg->dsize,msg->dcount,msg->dfree);
13225 +
13226 +               /* queue and adjust call state */
13227 +               spin_lock(&call->lock);
13228 +               list_add_tail(&msg->link,&call->acks_pendq);
13229 +
13230 +               /* decide what to do depending on current state and if this is the last packet */
13231 +               ret = -EINVAL;
13232 +               switch (call->app_call_state) {
13233 +               case RXRPC_CSTATE_SRVR_SND_REPLY:
13234 +                       if (msg->hdr.flags & RXRPC_LAST_PACKET) {
13235 +                               call->app_call_state = RXRPC_CSTATE_SRVR_RCV_FINAL_ACK;
13236 +                               _state(call);
13237 +                       }
13238 +                       break;
13239 +
13240 +               case RXRPC_CSTATE_CLNT_SND_ARGS:
13241 +                       if (msg->hdr.flags & RXRPC_LAST_PACKET) {
13242 +                               call->app_call_state = RXRPC_CSTATE_CLNT_RCV_REPLY;
13243 +                               _state(call);
13244 +                       }
13245 +                       break;
13246 +
13247 +               case RXRPC_CSTATE_ERROR:
13248 +                       ret = call->app_errno;
13249 +               default:
13250 +                       spin_unlock(&call->lock);
13251 +                       goto out;
13252 +               }
13253 +
13254 +               call->acks_pend_cnt++;
13255 +
13256 +               mod_timer(&call->acks_timeout,jiffies + rxrpc_call_acks_timeout);
13257 +
13258 +               spin_unlock(&call->lock);
13259 +
13260 +               ret = rxrpc_conn_sendmsg(call->conn,msg);
13261 +               if (ret==0)
13262 +                       call->pkt_snd_count++;
13263 +       }
13264 +
13265 + out:
13266 +       rxrpc_put_call(call);
13267 +
13268 +       _leave(" = %d",ret);
13269 +       return ret;
13270 +
13271 +} /* end rxrpc_call_flush() */
13272 +
13273 +/*****************************************************************************/
13274 +/*
13275 + * resend NAK'd or unacknowledged packets up to the highest one specified
13276 + */
13277 +static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
13278 +{
13279 +       struct rxrpc_message *msg;
13280 +       struct list_head *_p;
13281 +       rxrpc_seq_t seq = 0;
13282 +
13283 +       _enter("%p,%u",call,highest);
13284 +
13285 +       _proto("Rx Resend required");
13286 +
13287 +       /* handle too many resends */
13288 +       if (call->snd_resend_cnt>=rxrpc_call_max_resend) {
13289 +               _debug("Aborting due to too many resends (rcv=%d)",call->pkt_rcv_count);
13290 +               rxrpc_call_abort(call,call->pkt_rcv_count>0?-EIO:-ETIMEDOUT);
13291 +               _leave("");
13292 +               return;
13293 +       }
13294 +
13295 +       spin_lock(&call->lock);
13296 +       call->snd_resend_cnt++;
13297 +       for (;;) {
13298 +               /* determine which the next packet we might need to ACK is */
13299 +               if (seq<=call->acks_dftv_seq)
13300 +                       seq = call->acks_dftv_seq;
13301 +               seq++;
13302 +
13303 +               if (seq>highest)
13304 +                       break;
13305 +
13306 +               /* look for the packet in the pending-ACK queue */
13307 +               list_for_each(_p,&call->acks_pendq) {
13308 +                       msg = list_entry(_p,struct rxrpc_message,link);
13309 +                       if (msg->seq==seq)
13310 +                               goto found_msg;
13311 +               }
13312 +
13313 +               panic("%s(%p,%d): Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n",
13314 +                     __FUNCTION__,call,highest,call->acks_dftv_seq,call->snd_seq_count,seq);
13315 +
13316 +       found_msg:
13317 +               if (msg->state!=RXRPC_MSG_SENT)
13318 +                       continue; /* only un-ACK'd packets */
13319 +
13320 +               rxrpc_get_message(msg);
13321 +               spin_unlock(&call->lock);
13322 +
13323 +               /* send each message again (and ignore any errors we might incur) */
13324 +               _proto("Resending DATA message { ds=%u dc=%u df=%02lu }",
13325 +                      msg->dsize,msg->dcount,msg->dfree);
13326 +
13327 +               if (rxrpc_conn_sendmsg(call->conn,msg)==0)
13328 +                       call->pkt_snd_count++;
13329 +
13330 +               rxrpc_put_message(msg);
13331 +
13332 +               spin_lock(&call->lock);
13333 +       }
13334 +
13335 +       /* reset the timeout */
13336 +       mod_timer(&call->acks_timeout,jiffies + rxrpc_call_acks_timeout);
13337 +
13338 +       spin_unlock(&call->lock);
13339 +
13340 +       _leave("");
13341 +} /* end rxrpc_call_resend() */
13342 +
13343 +/*****************************************************************************/
13344 +/*
13345 + * handle an ICMP error being applied to a call
13346 + */
13347 +void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno)
13348 +{
13349 +       _enter("%p{%u},%d",call,ntohl(call->call_id),errno);
13350 +
13351 +       /* if this call is already aborted, then just wake up any waiters */
13352 +       if (call->app_call_state==RXRPC_CSTATE_ERROR) {
13353 +               call->app_error_func(call);
13354 +       }
13355 +       else {
13356 +               /* tell the app layer what happened */
13357 +               spin_lock(&call->lock);
13358 +               call->app_call_state = RXRPC_CSTATE_ERROR;
13359 +               _state(call);
13360 +               if (local)
13361 +                       call->app_err_state = RXRPC_ESTATE_LOCAL_ERROR;
13362 +               else
13363 +                       call->app_err_state = RXRPC_ESTATE_REMOTE_ERROR;
13364 +               call->app_errno = errno;
13365 +               call->app_mark = RXRPC_APP_MARK_EOF;
13366 +               call->app_read_buf = NULL;
13367 +               call->app_async_read = 0;
13368 +
13369 +               /* map the error */
13370 +               call->app_aemap_func(call);
13371 +
13372 +               del_timer_sync(&call->acks_timeout);
13373 +               del_timer_sync(&call->rcv_timeout);
13374 +               del_timer_sync(&call->ackr_dfr_timo);
13375 +
13376 +               spin_unlock(&call->lock);
13377 +
13378 +               call->app_error_func(call);
13379 +       }
13380 +
13381 +       _leave("");
13382 +} /* end rxrpc_call_handle_error() */
13383 diff -urNp linux-5240/net/rxrpc/connection.c linux-5250/net/rxrpc/connection.c
13384 --- linux-5240/net/rxrpc/connection.c   1970-01-01 01:00:00.000000000 +0100
13385 +++ linux-5250/net/rxrpc/connection.c   
13386 @@ -0,0 +1,686 @@
13387 +/* connection.c: Rx connection routines
13388 + *
13389 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
13390 + * Written by David Howells (dhowells@redhat.com)
13391 + *
13392 + * This program is free software; you can redistribute it and/or
13393 + * modify it under the terms of the GNU General Public License
13394 + * as published by the Free Software Foundation; either version
13395 + * 2 of the License, or (at your option) any later version.
13396 + */
13397 +
13398 +#include <linux/sched.h>
13399 +#include <linux/slab.h>
13400 +#include <linux/module.h>
13401 +#include <rxrpc/rxrpc.h>
13402 +#include <rxrpc/transport.h>
13403 +#include <rxrpc/peer.h>
13404 +#include <rxrpc/connection.h>
13405 +#include <rxrpc/call.h>
13406 +#include <rxrpc/message.h>
13407 +#include <linux/udp.h>
13408 +#include <linux/ip.h>
13409 +#include <net/sock.h>
13410 +#include <asm/uaccess.h>
13411 +#include "internal.h"
13412 +
13413 +__RXACCT_DECL(atomic_t rxrpc_connection_count);
13414 +
13415 +LIST_HEAD(rxrpc_conns);
13416 +DECLARE_RWSEM(rxrpc_conns_sem);
13417 +
13418 +static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
13419 +{
13420 +       struct rxrpc_connection *conn = list_entry(timer,struct rxrpc_connection,timeout);
13421 +
13422 +       _debug("Rx CONN TIMEOUT [%p{u=%d}]",conn,atomic_read(&conn->usage));
13423 +
13424 +       rxrpc_conn_do_timeout(conn);
13425 +}
13426 +
13427 +static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
13428 +       timed_out:      __rxrpc_conn_timeout,
13429 +};
13430 +
13431 +/*****************************************************************************/
13432 +/*
13433 + * create a new connection record
13434 + */
13435 +static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
13436 +                                           struct rxrpc_connection **_conn)
13437 +{
13438 +       struct rxrpc_connection *conn;
13439 +
13440 +       _enter("%p",peer);
13441 +
13442 +       /* allocate and initialise a connection record */
13443 +       conn = kmalloc(sizeof(struct rxrpc_connection),GFP_KERNEL);
13444 +       if (!conn) {
13445 +               _leave(" = -ENOMEM");
13446 +               return -ENOMEM;
13447 +       }
13448 +
13449 +       memset(conn,0,sizeof(struct rxrpc_connection));
13450 +       atomic_set(&conn->usage,1);
13451 +
13452 +       INIT_LIST_HEAD(&conn->link);
13453 +       init_waitqueue_head(&conn->chanwait);
13454 +       spin_lock_init(&conn->lock);
13455 +       rxrpc_timer_init(&conn->timeout,&rxrpc_conn_timer_ops);
13456 +
13457 +       conn->atime = xtime;
13458 +       conn->mtu_size = 1024;
13459 +       conn->peer = peer;
13460 +       conn->trans = peer->trans;
13461 +
13462 +       __RXACCT(atomic_inc(&rxrpc_connection_count));
13463 +       *_conn = conn;
13464 +       _leave(" = 0 (%p)",conn);
13465 +
13466 +       return 0;
13467 +} /* end __rxrpc_create_connection() */
13468 +
13469 +/*****************************************************************************/
13470 +/*
13471 + * create a new connection record for outgoing connections
13472 + */
13473 +int rxrpc_create_connection(struct rxrpc_transport *trans,
13474 +                           u16 port,
13475 +                           u32 addr,
13476 +                           unsigned short service_id,
13477 +                           void *security,
13478 +                           struct rxrpc_connection **_conn)
13479 +{
13480 +       struct rxrpc_connection *conn;
13481 +       struct rxrpc_peer *peer;
13482 +       int ret;
13483 +
13484 +       _enter("%p{%hu},%u,%hu",trans,trans->port,ntohs(port),service_id);
13485 +
13486 +       /* get a peer record */
13487 +       ret = rxrpc_peer_lookup(trans,addr,&peer);
13488 +       if (ret<0) {
13489 +               _leave(" = %d",ret);
13490 +               return ret;
13491 +       }
13492 +
13493 +       /* allocate and initialise a connection record */
13494 +       ret = __rxrpc_create_connection(peer,&conn);
13495 +       if (ret<0) {
13496 +               rxrpc_put_peer(peer);
13497 +               _leave(" = %d",ret);
13498 +               return ret;
13499 +       }
13500 +
13501 +       /* fill in the specific bits */
13502 +       conn->addr.sin_family   = AF_INET;
13503 +       conn->addr.sin_port     = port;
13504 +       conn->addr.sin_addr.s_addr = addr;
13505 +
13506 +       conn->in_epoch          = rxrpc_epoch;
13507 +       conn->out_epoch         = rxrpc_epoch;
13508 +       conn->in_clientflag     = 0;
13509 +       conn->out_clientflag    = RXRPC_CLIENT_INITIATED;
13510 +       conn->conn_id           = htonl((unsigned) conn & RXRPC_CIDMASK);
13511 +       conn->service_id        = htons(service_id);
13512 +
13513 +       /* attach to peer */
13514 +       conn->peer = peer;
13515 +
13516 +       write_lock(&peer->conn_lock);
13517 +       list_add_tail(&conn->link,&peer->conn_active);
13518 +       atomic_inc(&peer->conn_count);
13519 +       write_unlock(&peer->conn_lock);
13520 +
13521 +       down_write(&rxrpc_conns_sem);
13522 +       list_add_tail(&conn->proc_link,&rxrpc_conns);
13523 +       up_write(&rxrpc_conns_sem);
13524 +
13525 +       *_conn = conn;
13526 +       _leave(" = 0 (%p)",conn);
13527 +
13528 +       return 0;
13529 +} /* end rxrpc_create_connection() */
13530 +
13531 +/*****************************************************************************/
13532 +/*
13533 + * lookup the connection for an incoming packet
13534 + * - create a new connection record for unrecorded incoming connections
13535 + */
13536 +int rxrpc_connection_lookup(struct rxrpc_peer *peer,
13537 +                           struct rxrpc_message *msg,
13538 +                           struct rxrpc_connection **_conn)
13539 +{
13540 +       struct rxrpc_connection *conn, *candidate = NULL;
13541 +       struct list_head *_p;
13542 +       int ret, fresh = 0;
13543 +       u32 x_epoch, x_connid;
13544 +       u16 x_port, x_secix, x_servid;
13545 +       u8 x_clflag;
13546 +
13547 +       _enter("%p{{%hu}},%u,%hu",
13548 +              peer,peer->trans->port,ntohs(msg->pkt->h.uh->source),ntohs(msg->hdr.serviceId));
13549 +
13550 +       x_port          = msg->pkt->h.uh->source;
13551 +       x_epoch         = msg->hdr.epoch;
13552 +       x_clflag        = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
13553 +       x_connid        = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
13554 +       x_servid        = msg->hdr.serviceId;
13555 +       x_secix         = msg->hdr.securityIndex;
13556 +
13557 +       /* [common case] search the transport's active list first */
13558 +       read_lock(&peer->conn_lock);
13559 +       list_for_each(_p,&peer->conn_active) {
13560 +               conn = list_entry(_p,struct rxrpc_connection,link);
13561 +               if (conn->addr.sin_port         == x_port       &&
13562 +                   conn->in_epoch              == x_epoch      &&
13563 +                   conn->conn_id               == x_connid     &&
13564 +                   conn->security_ix           == x_secix      &&
13565 +                   conn->service_id            == x_servid     && 
13566 +                   conn->in_clientflag         == x_clflag)
13567 +                       goto found_active;
13568 +       }
13569 +       read_unlock(&peer->conn_lock);
13570 +
13571 +       /* [uncommon case] not active 
13572 +        * - create a candidate for a new record if an inbound connection
13573 +        * - only examine the graveyard for an outbound connection
13574 +        */
13575 +       if (x_clflag) {
13576 +               ret = __rxrpc_create_connection(peer,&candidate);
13577 +               if (ret<0) {
13578 +                       _leave(" = %d",ret);
13579 +                       return ret;
13580 +               }
13581 +
13582 +               /* fill in the specifics */
13583 +               candidate->addr.sin_family      = AF_INET;
13584 +               candidate->addr.sin_port        = x_port;
13585 +               candidate->addr.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
13586 +               candidate->in_epoch             = x_epoch;
13587 +               candidate->out_epoch            = x_epoch;
13588 +               candidate->in_clientflag        = RXRPC_CLIENT_INITIATED;
13589 +               candidate->out_clientflag       = 0;
13590 +               candidate->conn_id              = x_connid;
13591 +               candidate->service_id           = x_servid;
13592 +               candidate->security_ix          = x_secix;
13593 +       }
13594 +
13595 +       /* search the active list again, just in case it appeared whilst we were busy */
13596 +       write_lock(&peer->conn_lock);
13597 +       list_for_each(_p,&peer->conn_active) {
13598 +               conn = list_entry(_p,struct rxrpc_connection,link);
13599 +               if (conn->addr.sin_port         == x_port       &&
13600 +                   conn->in_epoch              == x_epoch      &&
13601 +                   conn->conn_id               == x_connid     &&
13602 +                   conn->security_ix           == x_secix      &&
13603 +                   conn->service_id            == x_servid     && 
13604 +                   conn->in_clientflag         == x_clflag)
13605 +                       goto found_active_second_chance;
13606 +       }
13607 +
13608 +       /* search the transport's graveyard list */
13609 +       spin_lock(&peer->conn_gylock);
13610 +       list_for_each(_p,&peer->conn_graveyard) {
13611 +               conn = list_entry(_p,struct rxrpc_connection,link);
13612 +               if (conn->addr.sin_port         == x_port       &&
13613 +                   conn->in_epoch              == x_epoch      &&
13614 +                   conn->conn_id               == x_connid     &&
13615 +                   conn->security_ix           == x_secix      &&
13616 +                   conn->service_id            == x_servid     && 
13617 +                   conn->in_clientflag         == x_clflag)
13618 +                       goto found_in_graveyard;
13619 +       }
13620 +       spin_unlock(&peer->conn_gylock);
13621 +
13622 +       /* outbound connections aren't created here */
13623 +       if (!x_clflag) {
13624 +               write_unlock(&peer->conn_lock);
13625 +               _leave(" = -ENOENT");
13626 +               return -ENOENT;
13627 +       }
13628 +
13629 +       /* we can now add the new candidate to the list */
13630 +       rxrpc_get_peer(peer);
13631 +       conn = candidate;
13632 +       candidate = NULL;
13633 +       atomic_inc(&peer->conn_count);
13634 +       fresh = 1;
13635 +
13636 + make_active:
13637 +       list_add_tail(&conn->link,&peer->conn_active);
13638 +
13639 + success_uwfree:
13640 +       write_unlock(&peer->conn_lock);
13641 +
13642 +       if (candidate) {
13643 +               __RXACCT(atomic_dec(&rxrpc_connection_count));
13644 +               kfree(candidate);
13645 +       }
13646 +
13647 +       if (fresh) {
13648 +               down_write(&rxrpc_conns_sem);
13649 +               list_add_tail(&conn->proc_link,&rxrpc_conns);
13650 +               up_write(&rxrpc_conns_sem);
13651 +       }
13652 +
13653 + success:
13654 +       *_conn = conn;
13655 +       _leave(" = 0 (%p)",conn);
13656 +       return 0;
13657 +
13658 +       /* handle the connection being found in the active list straight off */
13659 + found_active:
13660 +       rxrpc_get_connection(conn);
13661 +       read_unlock(&peer->conn_lock);
13662 +       goto success;
13663 +
13664 +       /* handle resurrecting a connection from the graveyard */
13665 + found_in_graveyard:
13666 +       rxrpc_get_peer(peer);
13667 +       rxrpc_get_connection(conn);
13668 +       rxrpc_krxtimod_del_timer(&conn->timeout);
13669 +       list_del_init(&conn->link);
13670 +       spin_unlock(&peer->conn_gylock);
13671 +       goto make_active;
13672 +
13673 +       /* handle finding the connection on the second time through the active list */
13674 + found_active_second_chance:
13675 +       rxrpc_get_connection(conn);
13676 +       goto success_uwfree;
13677 +
13678 +} /* end rxrpc_connection_lookup() */
13679 +
13680 +/*****************************************************************************/
13681 +/*
13682 + * finish using a connection record
13683 + * - it will be transferred to the peer's connection graveyard when refcount reaches 0
13684 + */
13685 +void rxrpc_put_connection(struct rxrpc_connection *conn)
13686 +{
13687 +       struct rxrpc_peer *peer = conn->peer;
13688 +
13689 +       _enter("%p{u=%d p=%hu}",conn,atomic_read(&conn->usage),ntohs(conn->addr.sin_port));
13690 +
13691 +       /* sanity check */
13692 +       if (atomic_read(&conn->usage)<=0)
13693 +               BUG();
13694 +
13695 +       spin_lock(&peer->conn_gylock);
13696 +       if (likely(!atomic_dec_and_test(&conn->usage))) {
13697 +               spin_unlock(&peer->conn_gylock);
13698 +               _leave("");
13699 +               return;
13700 +       }
13701 +
13702 +       /* move to graveyard queue */
13703 +       list_del(&conn->link);
13704 +       list_add_tail(&conn->link,&peer->conn_graveyard);
13705 +
13706 +       /* discard in 100 secs */
13707 +       rxrpc_krxtimod_add_timer(&conn->timeout,20*HZ);
13708 +
13709 +       spin_unlock(&peer->conn_gylock);
13710 +
13711 +       rxrpc_put_peer(conn->peer);
13712 +
13713 +       _leave(" [killed]");
13714 +} /* end rxrpc_put_connection() */
13715 +
13716 +/*****************************************************************************/
13717 +/*
13718 + * free a connection record
13719 + */
13720 +void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
13721 +{
13722 +       struct rxrpc_peer *peer;
13723 +
13724 +       _enter("%p{u=%d p=%hu}",conn,atomic_read(&conn->usage),ntohs(conn->addr.sin_port));
13725 +
13726 +       peer = conn->peer;
13727 +
13728 +       if (atomic_read(&conn->usage)<0)
13729 +               BUG();
13730 +
13731 +       /* remove from graveyard if still dead */
13732 +       spin_lock(&peer->conn_gylock);
13733 +       if (atomic_read(&conn->usage)==0) {
13734 +               list_del_init(&conn->link);
13735 +       }
13736 +       else {
13737 +               conn = NULL;
13738 +       }
13739 +       spin_unlock(&peer->conn_gylock);
13740 +
13741 +       if (!conn) {
13742 +               _leave("");
13743 +               return; /* resurrected */
13744 +       }
13745 +
13746 +       _debug("--- Destroying Connection %p ---",conn);
13747 +
13748 +       down_write(&rxrpc_conns_sem);
13749 +       list_del(&conn->proc_link);
13750 +       up_write(&rxrpc_conns_sem);
13751 +
13752 +       __RXACCT(atomic_dec(&rxrpc_connection_count));
13753 +       kfree(conn);
13754 +
13755 +       /* if the graveyard is now empty, wake up anyone waiting for that */
13756 +       if (atomic_dec_and_test(&peer->conn_count))
13757 +               wake_up(&peer->conn_gy_waitq);
13758 +
13759 +       _leave(" [destroyed]");
13760 +} /* end rxrpc_conn_do_timeout() */
13761 +
13762 +/*****************************************************************************/
13763 +/*
13764 + * clear all connection records from a peer endpoint
13765 + */
13766 +void rxrpc_conn_clearall(struct rxrpc_peer *peer)
13767 +{
13768 +       DECLARE_WAITQUEUE(myself,current);
13769 +
13770 +       struct rxrpc_connection *conn;
13771 +       int err;
13772 +
13773 +       _enter("%p",peer);
13774 +
13775 +       /* there shouldn't be any active conns remaining */
13776 +       if (!list_empty(&peer->conn_active))
13777 +               BUG();
13778 +
13779 +       /* manually timeout all conns in the graveyard */
13780 +       spin_lock(&peer->conn_gylock);
13781 +       while (!list_empty(&peer->conn_graveyard)) {
13782 +               conn = list_entry(peer->conn_graveyard.next,struct rxrpc_connection,link);
13783 +               err = rxrpc_krxtimod_del_timer(&conn->timeout);
13784 +               spin_unlock(&peer->conn_gylock);
13785 +
13786 +               if (err==0)
13787 +                       rxrpc_conn_do_timeout(conn);
13788 +
13789 +               spin_lock(&peer->conn_gylock);
13790 +       }
13791 +       spin_unlock(&peer->conn_gylock);
13792 +
13793 +       /* wait for the the conn graveyard to be completely cleared */
13794 +       set_current_state(TASK_UNINTERRUPTIBLE);
13795 +       add_wait_queue(&peer->conn_gy_waitq,&myself);
13796 +
13797 +       while (atomic_read(&peer->conn_count)!=0) {
13798 +               schedule();
13799 +               set_current_state(TASK_UNINTERRUPTIBLE);
13800 +       }
13801 +
13802 +       remove_wait_queue(&peer->conn_gy_waitq,&myself);
13803 +       set_current_state(TASK_RUNNING);
13804 +
13805 +       _leave("");
13806 +
13807 +} /* end rxrpc_conn_clearall() */
13808 +
13809 +/*****************************************************************************/
13810 +/*
13811 + * allocate and prepare a message for sending out through the transport endpoint
13812 + */
13813 +int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
13814 +                     struct rxrpc_call *call,
13815 +                     u8 type,
13816 +                     int dcount,
13817 +                     struct iovec diov[],
13818 +                     int alloc_flags,
13819 +                     struct rxrpc_message **_msg)
13820 +{
13821 +       struct rxrpc_message *msg;
13822 +       int loop;
13823 +
13824 +       _enter("%p{%d},%p,%u",conn,ntohs(conn->addr.sin_port),call,type);
13825 +
13826 +       if (dcount>3) {
13827 +               _leave(" = -EINVAL");
13828 +               return -EINVAL;
13829 +       }
13830 +
13831 +       msg = kmalloc(sizeof(struct rxrpc_message),alloc_flags);
13832 +       if (!msg) {
13833 +               _leave(" = -ENOMEM");
13834 +               return -ENOMEM;
13835 +       }
13836 +
13837 +       memset(msg,0,sizeof(*msg));
13838 +       atomic_set(&msg->usage,1);
13839 +
13840 +       INIT_LIST_HEAD(&msg->link);
13841 +
13842 +       msg->state = RXRPC_MSG_PREPARED;
13843 +
13844 +       msg->hdr.epoch          = conn->out_epoch;
13845 +       msg->hdr.cid            = conn->conn_id | (call ? call->chan_ix : 0);
13846 +       msg->hdr.callNumber     = call ? call->call_id : 0;
13847 +       msg->hdr.type           = type;
13848 +       msg->hdr.flags          = conn->out_clientflag;
13849 +       msg->hdr.securityIndex  = conn->security_ix;
13850 +       msg->hdr.serviceId      = conn->service_id;
13851 +
13852 +       /* generate sequence numbers for data packets */
13853 +       if (call) {
13854 +               switch (type) {
13855 +               case RXRPC_PACKET_TYPE_DATA:
13856 +                       msg->seq = ++call->snd_seq_count;
13857 +                       msg->hdr.seq = htonl(msg->seq);
13858 +                       break;
13859 +               case RXRPC_PACKET_TYPE_ACK:
13860 +                       /* ACK sequence numbers are complicated. The following may be wrong:
13861 +                        * - jumbo packet ACKs should have a seq number
13862 +                        * - normal ACKs should not
13863 +                        */
13864 +               default:
13865 +                       break;
13866 +               }
13867 +       }
13868 +
13869 +       msg->dcount = dcount + 1;
13870 +       msg->dsize = sizeof(msg->hdr);
13871 +       msg->data[0].iov_len = sizeof(msg->hdr);
13872 +       msg->data[0].iov_base = &msg->hdr;
13873 +
13874 +       for (loop=0; loop<dcount; loop++) {
13875 +               msg->dsize += diov[loop].iov_len;
13876 +               msg->data[loop+1].iov_len  = diov[loop].iov_len;
13877 +               msg->data[loop+1].iov_base = diov[loop].iov_base;
13878 +       }
13879 +
13880 +       __RXACCT(atomic_inc(&rxrpc_message_count));
13881 +       *_msg = msg;
13882 +       _leave(" = 0 (%p) #%d",msg,atomic_read(&rxrpc_message_count));
13883 +       return 0;
13884 +} /* end rxrpc_conn_newmsg() */
13885 +
13886 +/*****************************************************************************/
13887 +/*
13888 + * free a message
13889 + */
13890 +void __rxrpc_put_message(struct rxrpc_message *msg)
13891 +{
13892 +       int loop;
13893 +
13894 +       _enter("%p #%d",msg,atomic_read(&rxrpc_message_count));
13895 +
13896 +       if (msg->pkt) kfree_skb(msg->pkt);
13897 +       if (msg->conn) rxrpc_put_connection(msg->conn);
13898 +
13899 +       for (loop=0; loop<8; loop++)
13900 +               if (test_bit(loop,&msg->dfree))
13901 +                       kfree(msg->data[loop].iov_base);
13902 +
13903 +       __RXACCT(atomic_dec(&rxrpc_message_count));
13904 +       kfree(msg);
13905 +
13906 +       _leave("");
13907 +} /* end __rxrpc_put_message() */
13908 +
13909 +/*****************************************************************************/
13910 +/*
13911 + * send a message out through the transport endpoint
13912 + */
13913 +int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg)
13914 +{
13915 +       struct msghdr msghdr;
13916 +       mm_segment_t oldfs;
13917 +       int ret;
13918 +
13919 +       _enter("%p{%d}",conn,ntohs(conn->addr.sin_port));
13920 +
13921 +       /* fill in some fields in the header */
13922 +       spin_lock(&conn->lock);
13923 +       msg->hdr.serial = htonl(++conn->serial_counter);
13924 +       msg->rttdone = 0;
13925 +       spin_unlock(&conn->lock);
13926 +
13927 +       /* set up the message to be transmitted */
13928 +       msghdr.msg_name         = &conn->addr;
13929 +       msghdr.msg_namelen      = sizeof(conn->addr);
13930 +       msghdr.msg_iov          = msg->data;
13931 +       msghdr.msg_iovlen       = msg->dcount;
13932 +       msghdr.msg_control      = NULL;
13933 +       msghdr.msg_controllen   = 0;
13934 +       msghdr.msg_flags        = MSG_CONFIRM|MSG_DONTWAIT;
13935 +
13936 +       _net("Sending message type %d of %d bytes to %08x:%d",
13937 +            msg->hdr.type,
13938 +            msg->dsize,
13939 +            htonl(conn->addr.sin_addr.s_addr),
13940 +            htons(conn->addr.sin_port));
13941 +
13942 +       /* send the message */
13943 +       oldfs = get_fs();
13944 +       set_fs(KERNEL_DS);
13945 +       ret = sock_sendmsg(conn->trans->socket,&msghdr,msg->dsize);
13946 +       set_fs(oldfs);
13947 +
13948 +       if (ret<0) {
13949 +               msg->state = RXRPC_MSG_ERROR;
13950 +       }
13951 +       else {
13952 +               msg->state = RXRPC_MSG_SENT;
13953 +               ret = 0;
13954 +
13955 +               spin_lock(&conn->lock);
13956 +               msg->stamp = conn->atime = xtime;
13957 +               spin_unlock(&conn->lock);
13958 +       }
13959 +
13960 +       _leave(" = %d",ret);
13961 +
13962 +       return ret;
13963 +} /* end rxrpc_conn_sendmsg() */
13964 +
13965 +/*****************************************************************************/
13966 +/*
13967 + * deal with a subsequent call packet
13968 + */
13969 +int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
13970 +                                  struct rxrpc_call *call,
13971 +                                  struct rxrpc_message *msg)
13972 +{
13973 +       struct rxrpc_message *pmsg;
13974 +       struct list_head *_p;
13975 +       unsigned cix, seq;
13976 +       int ret = 0;
13977 +
13978 +       _enter("%p,%p,%p",conn,call,msg);
13979 +
13980 +       if (!call) {
13981 +               cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
13982 +
13983 +               spin_lock(&conn->lock);
13984 +               call = conn->channels[cix];
13985 +
13986 +               if (!call || call->call_id != msg->hdr.callNumber) {
13987 +                       spin_unlock(&conn->lock);
13988 +                       rxrpc_trans_immediate_abort(conn->trans,msg,-ENOENT);
13989 +                       goto out;
13990 +               }
13991 +               else {
13992 +                       rxrpc_get_call(call);
13993 +                       spin_unlock(&conn->lock);
13994 +               }
13995 +       }
13996 +       else {
13997 +               rxrpc_get_call(call);
13998 +       }
13999 +
14000 +       _proto("Received packet %%%u [%u] on call %hu:%u:%u",
14001 +              htonl(msg->hdr.serial),
14002 +              htonl(msg->hdr.seq),
14003 +              htons(msg->hdr.serviceId),
14004 +              htonl(conn->conn_id),
14005 +              htonl(call->call_id));
14006 +
14007 +       call->pkt_rcv_count++;
14008 +
14009 +       if (msg->pkt->dst && msg->pkt->dst->dev)
14010 +               conn->peer->if_mtu = msg->pkt->dst->dev->mtu - msg->pkt->dst->dev->hard_header_len;
14011 +
14012 +       /* queue on the call in seq order */
14013 +       rxrpc_get_message(msg);
14014 +       seq = msg->seq;
14015 +
14016 +       spin_lock(&call->lock);
14017 +       list_for_each(_p,&call->rcv_receiveq) {
14018 +               pmsg = list_entry(_p,struct rxrpc_message,link);
14019 +               if (pmsg->seq>seq)
14020 +                       break;
14021 +       }
14022 +       list_add_tail(&msg->link,_p);
14023 +
14024 +       /* reset the activity timeout */
14025 +       call->flags |= RXRPC_CALL_RCV_PKT;
14026 +       mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ);
14027 +
14028 +       spin_unlock(&call->lock);
14029 +
14030 +       rxrpc_krxiod_queue_call(call);
14031 +
14032 +       rxrpc_put_call(call);
14033 + out:
14034 +       _leave(" = %d",ret);
14035 +
14036 +       return ret;
14037 +} /* end rxrpc_conn_receive_call_packet() */
14038 +
14039 +/*****************************************************************************/
14040 +/*
14041 + * handle an ICMP error being applied to a connection
14042 + */
14043 +void rxrpc_conn_handle_error(struct rxrpc_connection *conn, int local, int errno)
14044 +{
14045 +       struct rxrpc_call *calls[4];
14046 +       int loop;
14047 +
14048 +       _enter("%p{%d},%d",conn,ntohs(conn->addr.sin_port),errno);
14049 +
14050 +       /* get a ref to all my calls in one go */
14051 +       memset(calls,0,sizeof(calls));
14052 +       spin_lock(&conn->lock);
14053 +
14054 +       for (loop=3; loop>=0; loop--) {
14055 +               if (conn->channels[loop]) {
14056 +                       calls[loop] = conn->channels[loop];
14057 +                       rxrpc_get_call(calls[loop]);
14058 +               }
14059 +       }
14060 +
14061 +       spin_unlock(&conn->lock);
14062 +
14063 +       /* now kick them all */
14064 +       for (loop=3; loop>=0; loop--) {
14065 +               if (calls[loop]) {
14066 +                       rxrpc_call_handle_error(calls[loop],local,errno);
14067 +                       rxrpc_put_call(calls[loop]);
14068 +               }
14069 +       }
14070 +
14071 +       _leave("");
14072 +} /* end rxrpc_conn_handle_error() */
14073 diff -urNp linux-5240/net/rxrpc/internal.h linux-5250/net/rxrpc/internal.h
14074 --- linux-5240/net/rxrpc/internal.h     1970-01-01 01:00:00.000000000 +0100
14075 +++ linux-5250/net/rxrpc/internal.h     
14076 @@ -0,0 +1,96 @@
14077 +/* internal.h: internal Rx RPC stuff
14078 + *
14079 + * Copyright (c) 2002   David Howells (dhowells@redhat.com).
14080 + */
14081 +
14082 +#ifndef RXRPC_INTERNAL_H
14083 +#define RXRPC_INTERNAL_H
14084 +
14085 +#include <linux/compiler.h>
14086 +#include <linux/kernel.h>
14087 +
14088 +/*
14089 + * debug accounting
14090 + */
14091 +#if 1
14092 +#define __RXACCT_DECL(X) X
14093 +#define __RXACCT(X) do { X; } while(0) 
14094 +#else
14095 +#define __RXACCT_DECL(X)
14096 +#define __RXACCT(X) do { } while(0)
14097 +#endif
14098 +
14099 +__RXACCT_DECL(extern atomic_t rxrpc_transport_count);
14100 +__RXACCT_DECL(extern atomic_t rxrpc_peer_count);
14101 +__RXACCT_DECL(extern atomic_t rxrpc_connection_count);
14102 +__RXACCT_DECL(extern atomic_t rxrpc_call_count);
14103 +__RXACCT_DECL(extern atomic_t rxrpc_message_count);
14104 +
14105 +/*
14106 + * debug tracing
14107 + */
14108 +#define kenter(FMT,...)        printk("==> %s("FMT")\n",__FUNCTION__,##__VA_ARGS__)
14109 +#define kleave(FMT,...)        printk("<== %s()"FMT"\n",__FUNCTION__,##__VA_ARGS__)
14110 +#define kdebug(FMT,...)        printk("    "FMT"\n",##__VA_ARGS__)
14111 +#define kproto(FMT,...)        printk("### "FMT"\n",##__VA_ARGS__)
14112 +#define knet(FMT,...)  printk("    "FMT"\n",##__VA_ARGS__)
14113 +
14114 +#if 0
14115 +#define _enter(FMT,...)        kenter(FMT,##__VA_ARGS__)
14116 +#define _leave(FMT,...)        kleave(FMT,##__VA_ARGS__)
14117 +#define _debug(FMT,...)        kdebug(FMT,##__VA_ARGS__)
14118 +#define _proto(FMT,...)        kproto(FMT,##__VA_ARGS__)
14119 +#define _net(FMT,...)  knet(FMT,##__VA_ARGS__)
14120 +#else
14121 +#define _enter(FMT,...)        do { if (rxrpc_ktrace) kenter(FMT,##__VA_ARGS__); } while(0)
14122 +#define _leave(FMT,...)        do { if (rxrpc_ktrace) kleave(FMT,##__VA_ARGS__); } while(0)
14123 +#define _debug(FMT,...)        do { if (rxrpc_kdebug) kdebug(FMT,##__VA_ARGS__); } while(0)
14124 +#define _proto(FMT,...)        do { if (rxrpc_kproto) kproto(FMT,##__VA_ARGS__); } while(0)
14125 +#define _net(FMT,...)  do { if (rxrpc_knet)   knet  (FMT,##__VA_ARGS__); } while(0)
14126 +#endif
14127 +
14128 +/*
14129 + * call.c
14130 + */
14131 +extern struct list_head rxrpc_calls;
14132 +extern struct rw_semaphore rxrpc_calls_sem;
14133 +
14134 +/*
14135 + * connection.c
14136 + */
14137 +extern struct list_head rxrpc_conns;
14138 +extern struct rw_semaphore rxrpc_conns_sem;
14139 +
14140 +extern void rxrpc_conn_do_timeout(struct rxrpc_connection *conn);
14141 +extern void rxrpc_conn_clearall(struct rxrpc_peer *peer);
14142 +
14143 +/*
14144 + * peer.c
14145 + */
14146 +extern struct list_head rxrpc_peers;
14147 +extern struct rw_semaphore rxrpc_peers_sem;
14148 +
14149 +extern void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
14150 +                                    struct rxrpc_message *msg,
14151 +                                    struct rxrpc_message *resp);
14152 +
14153 +extern void rxrpc_peer_clearall(struct rxrpc_transport *trans);
14154 +
14155 +extern void rxrpc_peer_do_timeout(struct rxrpc_peer *peer);
14156 +
14157 +
14158 +/*
14159 + * proc.c
14160 + */
14161 +#ifdef CONFIG_PROC_FS
14162 +extern int rxrpc_proc_init(void);
14163 +extern void rxrpc_proc_cleanup(void);
14164 +#endif
14165 +
14166 +/*
14167 + * transport.c
14168 + */
14169 +extern struct list_head rxrpc_proc_transports;
14170 +extern struct rw_semaphore rxrpc_proc_transports_sem;
14171 +
14172 +#endif /* RXRPC_INTERNAL_H */
14173 diff -urNp linux-5240/net/rxrpc/krxiod.c linux-5250/net/rxrpc/krxiod.c
14174 --- linux-5240/net/rxrpc/krxiod.c       1970-01-01 01:00:00.000000000 +0100
14175 +++ linux-5250/net/rxrpc/krxiod.c       
14176 @@ -0,0 +1,268 @@
14177 +/* krxiod.c: Rx I/O daemon
14178 + *
14179 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
14180 + * Written by David Howells (dhowells@redhat.com)
14181 + *
14182 + * This program is free software; you can redistribute it and/or
14183 + * modify it under the terms of the GNU General Public License
14184 + * as published by the Free Software Foundation; either version
14185 + * 2 of the License, or (at your option) any later version.
14186 + */
14187 +
14188 +#include <linux/version.h>
14189 +#include <linux/sched.h>
14190 +#include <linux/completion.h>
14191 +#include <linux/spinlock.h>
14192 +#include <linux/init.h>
14193 +#include <rxrpc/krxiod.h>
14194 +#include <rxrpc/transport.h>
14195 +#include <rxrpc/peer.h>
14196 +#include <rxrpc/call.h>
14197 +#include "internal.h"
14198 +
14199 +static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
14200 +static DECLARE_COMPLETION(rxrpc_krxiod_dead);
14201 +
14202 +static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
14203 +
14204 +static LIST_HEAD(rxrpc_krxiod_transportq);
14205 +static spinlock_t rxrpc_krxiod_transportq_lock = SPIN_LOCK_UNLOCKED;
14206 +
14207 +static LIST_HEAD(rxrpc_krxiod_callq);
14208 +static spinlock_t rxrpc_krxiod_callq_lock = SPIN_LOCK_UNLOCKED;
14209 +
14210 +static volatile int rxrpc_krxiod_die;
14211 +
14212 +/*****************************************************************************/
14213 +/*
14214 + * Rx I/O daemon
14215 + */
14216 +static int rxrpc_krxiod(void *arg)
14217 +{
14218 +       DECLARE_WAITQUEUE(krxiod,current);
14219 +
14220 +       siginfo_t sinfo;
14221 +
14222 +       printk("Started krxiod %d\n",current->pid);
14223 +       strcpy(current->comm,"krxiod");
14224 +
14225 +       daemonize();
14226 +
14227 +       /* only certain signals are of interest */
14228 +       spin_lock_irq(&current->sigmask_lock);
14229 +       siginitsetinv(&current->blocked,0);
14230 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
14231 +       recalc_sigpending();
14232 +#else
14233 +       recalc_sigpending(current);
14234 +#endif
14235 +       spin_unlock_irq(&current->sigmask_lock);
14236 +
14237 +       /* loop around waiting for work to do */
14238 +       do {
14239 +               /* wait for work or to be told to exit */
14240 +               _debug("### Begin Wait");
14241 +               if (!atomic_read(&rxrpc_krxiod_qcount)) {
14242 +                       set_current_state(TASK_INTERRUPTIBLE);
14243 +
14244 +                       add_wait_queue(&rxrpc_krxiod_sleepq,&krxiod);
14245 +
14246 +                       for (;;) {
14247 +                               set_current_state(TASK_INTERRUPTIBLE);
14248 +                               if (atomic_read(&rxrpc_krxiod_qcount) ||
14249 +                                   rxrpc_krxiod_die ||
14250 +                                   signal_pending(current))
14251 +                                       break;
14252 +
14253 +                               schedule();
14254 +                       }
14255 +
14256 +                       remove_wait_queue(&rxrpc_krxiod_sleepq,&krxiod);
14257 +                       set_current_state(TASK_RUNNING);
14258 +               }
14259 +               _debug("### End Wait");
14260 +
14261 +               /* do work if been given some to do */
14262 +               _debug("### Begin Work");
14263 +
14264 +               /* see if there's a transport in need of attention */
14265 +               if (!list_empty(&rxrpc_krxiod_transportq)) {
14266 +                       struct rxrpc_transport *trans = NULL;
14267 +
14268 +                       spin_lock_irq(&rxrpc_krxiod_transportq_lock);
14269 +
14270 +                       if (!list_empty(&rxrpc_krxiod_transportq)) {
14271 +                               trans = list_entry(rxrpc_krxiod_transportq.next,
14272 +                                                  struct rxrpc_transport,krxiodq_link);
14273 +                               list_del_init(&trans->krxiodq_link);
14274 +                               atomic_dec(&rxrpc_krxiod_qcount);
14275 +
14276 +                               /* make sure it hasn't gone away and doesn't go away */
14277 +                               if (atomic_read(&trans->usage)>0)
14278 +                                       rxrpc_get_transport(trans);
14279 +                               else
14280 +                                       trans = NULL;
14281 +                       }
14282 +
14283 +                       spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
14284 +
14285 +                       if (trans) {
14286 +                               rxrpc_trans_receive_packet(trans);
14287 +                               rxrpc_put_transport(trans);
14288 +                       }
14289 +               }
14290 +
14291 +               /* see if there's a call in need of attention */
14292 +               if (!list_empty(&rxrpc_krxiod_callq)) {
14293 +                       struct rxrpc_call *call = NULL;
14294 +
14295 +                       spin_lock_irq(&rxrpc_krxiod_callq_lock);
14296 +
14297 +                       if (!list_empty(&rxrpc_krxiod_callq)) {
14298 +                               call = list_entry(rxrpc_krxiod_callq.next,
14299 +                                                  struct rxrpc_call,rcv_krxiodq_lk);
14300 +                               list_del_init(&call->rcv_krxiodq_lk);
14301 +                               atomic_dec(&rxrpc_krxiod_qcount);
14302 +
14303 +                               /* make sure it hasn't gone away and doesn't go away */
14304 +                               if (atomic_read(&call->usage)>0) {
14305 +                                       _debug("@@@ KRXIOD Begin Attend Call %p",call);
14306 +                                       rxrpc_get_call(call);
14307 +                               }
14308 +                               else {
14309 +                                       call = NULL;
14310 +                               }
14311 +                       }
14312 +
14313 +                       spin_unlock_irq(&rxrpc_krxiod_callq_lock);
14314 +
14315 +                       if (call) {
14316 +                               rxrpc_call_do_stuff(call);
14317 +                               rxrpc_put_call(call);
14318 +                               _debug("@@@ KRXIOD End Attend Call %p",call);
14319 +                       }
14320 +               }
14321 +
14322 +               _debug("### End Work");
14323 +
14324 +                /* discard pending signals */
14325 +                while (signal_pending(current)) {
14326 +                        spin_lock_irq(&current->sigmask_lock);
14327 +                        dequeue_signal(&current->blocked,&sinfo);
14328 +                        spin_unlock_irq(&current->sigmask_lock);
14329 +                }
14330 +
14331 +       } while (!rxrpc_krxiod_die);
14332 +
14333 +       /* and that's all */
14334 +       complete_and_exit(&rxrpc_krxiod_dead,0);
14335 +
14336 +} /* end rxrpc_krxiod() */
14337 +
14338 +/*****************************************************************************/
14339 +/*
14340 + * start up a krxiod daemon
14341 + */
14342 +int __init rxrpc_krxiod_init(void)
14343 +{
14344 +       return kernel_thread(rxrpc_krxiod,NULL,0);
14345 +
14346 +} /* end rxrpc_krxiod_init() */
14347 +
14348 +/*****************************************************************************/
14349 +/*
14350 + * kill the krxiod daemon and wait for it to complete
14351 + */
14352 +void rxrpc_krxiod_kill(void)
14353 +{
14354 +       rxrpc_krxiod_die = 1;
14355 +       wake_up_all(&rxrpc_krxiod_sleepq);
14356 +       wait_for_completion(&rxrpc_krxiod_dead);
14357 +
14358 +} /* end rxrpc_krxiod_kill() */
14359 +
14360 +/*****************************************************************************/
14361 +/*
14362 + * queue a transport for attention by krxiod
14363 + */
14364 +void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
14365 +{
14366 +       unsigned long flags;
14367 +
14368 +       _enter("");
14369 +
14370 +       if (list_empty(&trans->krxiodq_link)) {
14371 +               spin_lock_irqsave(&rxrpc_krxiod_transportq_lock,flags);
14372 +
14373 +               if (list_empty(&trans->krxiodq_link)) {
14374 +                       if (atomic_read(&trans->usage)>0) {
14375 +                               list_add_tail(&trans->krxiodq_link,&rxrpc_krxiod_transportq);
14376 +                               atomic_inc(&rxrpc_krxiod_qcount);
14377 +                       }
14378 +               }
14379 +
14380 +               spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock,flags);
14381 +               wake_up_all(&rxrpc_krxiod_sleepq);
14382 +       }
14383 +
14384 +       _leave("");
14385 +
14386 +} /* end rxrpc_krxiod_queue_transport() */
14387 +
14388 +/*****************************************************************************/
14389 +/*
14390 + * dequeue a transport from krxiod's attention queue
14391 + */
14392 +void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
14393 +{
14394 +       unsigned long flags;
14395 +
14396 +       _enter("");
14397 +
14398 +       spin_lock_irqsave(&rxrpc_krxiod_transportq_lock,flags);
14399 +       if (!list_empty(&trans->krxiodq_link)) {
14400 +               list_del_init(&trans->krxiodq_link);
14401 +               atomic_dec(&rxrpc_krxiod_qcount);
14402 +       }
14403 +       spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock,flags);
14404 +
14405 +       _leave("");
14406 +
14407 +} /* end rxrpc_krxiod_dequeue_transport() */
14408 +
14409 +/*****************************************************************************/
14410 +/*
14411 + * queue a call for attention by krxiod
14412 + */
14413 +void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
14414 +{
14415 +       unsigned long flags;
14416 +
14417 +       if (list_empty(&call->rcv_krxiodq_lk)) {
14418 +               spin_lock_irqsave(&rxrpc_krxiod_callq_lock,flags);
14419 +               if (atomic_read(&call->usage)>0) {
14420 +                       list_add_tail(&call->rcv_krxiodq_lk,&rxrpc_krxiod_callq);
14421 +                       atomic_inc(&rxrpc_krxiod_qcount);
14422 +               }
14423 +               spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock,flags);
14424 +       }
14425 +       wake_up_all(&rxrpc_krxiod_sleepq);
14426 +       
14427 +} /* end rxrpc_krxiod_queue_call() */
14428 +
14429 +/*****************************************************************************/
14430 +/*
14431 + * dequeue a call from krxiod's attention queue
14432 + */
14433 +void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
14434 +{
14435 +       unsigned long flags;
14436 +
14437 +       spin_lock_irqsave(&rxrpc_krxiod_callq_lock,flags);
14438 +       if (!list_empty(&call->rcv_krxiodq_lk)) {
14439 +               list_del_init(&call->rcv_krxiodq_lk);
14440 +               atomic_dec(&rxrpc_krxiod_qcount);
14441 +       }
14442 +       spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock,flags);
14443 +
14444 +} /* end rxrpc_krxiod_dequeue_call() */
14445 diff -urNp linux-5240/net/rxrpc/krxsecd.c linux-5250/net/rxrpc/krxsecd.c
14446 --- linux-5240/net/rxrpc/krxsecd.c      1970-01-01 01:00:00.000000000 +0100
14447 +++ linux-5250/net/rxrpc/krxsecd.c      
14448 @@ -0,0 +1,283 @@
14449 +/* krxsecd.c: Rx security daemon
14450 + *
14451 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
14452 + * Written by David Howells (dhowells@redhat.com)
14453 + *
14454 + * This program is free software; you can redistribute it and/or
14455 + * modify it under the terms of the GNU General Public License
14456 + * as published by the Free Software Foundation; either version
14457 + * 2 of the License, or (at your option) any later version.
14458 + *
14459 + * This daemon deals with:
14460 + * - consulting the application as to whether inbound peers and calls should be authorised
14461 + * - generating security challenges for inbound connections
14462 + * - responding to security challenges on outbound connections
14463 + */
14464 +
14465 +#include <linux/version.h>
14466 +#include <linux/module.h>
14467 +#include <linux/sched.h>
14468 +#include <linux/completion.h>
14469 +#include <linux/spinlock.h>
14470 +#include <linux/init.h>
14471 +#include <rxrpc/krxsecd.h>
14472 +#include <rxrpc/transport.h>
14473 +#include <rxrpc/connection.h>
14474 +#include <rxrpc/message.h>
14475 +#include <rxrpc/peer.h>
14476 +#include <rxrpc/call.h>
14477 +#include <linux/udp.h>
14478 +#include <linux/ip.h>
14479 +#include <net/sock.h>
14480 +#include "internal.h"
14481 +
14482 +static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxsecd_sleepq);
14483 +static DECLARE_COMPLETION(rxrpc_krxsecd_dead);
14484 +static volatile int rxrpc_krxsecd_die;
14485 +
14486 +static atomic_t rxrpc_krxsecd_qcount;
14487 +
14488 +/* queue of unprocessed inbound messages with seqno #1 and RXRPC_CLIENT_INITIATED flag set */
14489 +static LIST_HEAD(rxrpc_krxsecd_initmsgq);
14490 +static spinlock_t rxrpc_krxsecd_initmsgq_lock = SPIN_LOCK_UNLOCKED;
14491 +
14492 +static void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg);
14493 +
14494 +/*****************************************************************************/
14495 +/*
14496 + * Rx security daemon
14497 + */
14498 +static int rxrpc_krxsecd(void *arg)
14499 +{
14500 +       DECLARE_WAITQUEUE(krxsecd,current);
14501 +
14502 +       siginfo_t sinfo;
14503 +       int die;
14504 +
14505 +       printk("Started krxsecd %d\n",current->pid);
14506 +       strcpy(current->comm,"krxsecd");
14507 +
14508 +       daemonize();
14509 +
14510 +       /* only certain signals are of interest */
14511 +       spin_lock_irq(&current->sigmask_lock);
14512 +       siginitsetinv(&current->blocked,0);
14513 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
14514 +       recalc_sigpending();
14515 +#else
14516 +       recalc_sigpending(current);
14517 +#endif
14518 +       spin_unlock_irq(&current->sigmask_lock);
14519 +
14520 +       /* loop around waiting for work to do */
14521 +       do {
14522 +               /* wait for work or to be told to exit */
14523 +               _debug("### Begin Wait");
14524 +               if (!atomic_read(&rxrpc_krxsecd_qcount)) {
14525 +                       set_current_state(TASK_INTERRUPTIBLE);
14526 +
14527 +                       add_wait_queue(&rxrpc_krxsecd_sleepq,&krxsecd);
14528 +
14529 +                       for (;;) {
14530 +                               set_current_state(TASK_INTERRUPTIBLE);
14531 +                               if (atomic_read(&rxrpc_krxsecd_qcount) ||
14532 +                                   rxrpc_krxsecd_die ||
14533 +                                   signal_pending(current))
14534 +                                       break;
14535 +
14536 +                               schedule();
14537 +                       }
14538 +
14539 +                       remove_wait_queue(&rxrpc_krxsecd_sleepq,&krxsecd);
14540 +                       set_current_state(TASK_RUNNING);
14541 +               }
14542 +               die = rxrpc_krxsecd_die;
14543 +               _debug("### End Wait");
14544 +
14545 +               /* see if there're incoming calls in need of authenticating */
14546 +               _debug("### Begin Inbound Calls");
14547 +
14548 +               if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
14549 +                       struct rxrpc_message *msg = NULL;
14550 +
14551 +                       spin_lock(&rxrpc_krxsecd_initmsgq_lock);
14552 +
14553 +                       if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
14554 +                               msg = list_entry(rxrpc_krxsecd_initmsgq.next,
14555 +                                                struct rxrpc_message,link);
14556 +                               list_del_init(&msg->link);
14557 +                               atomic_dec(&rxrpc_krxsecd_qcount);
14558 +                       }
14559 +
14560 +                       spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
14561 +
14562 +                       if (msg) {
14563 +                               rxrpc_krxsecd_process_incoming_call(msg);
14564 +                               rxrpc_put_message(msg);
14565 +                       }
14566 +               }
14567 +
14568 +               _debug("### End Inbound Calls");
14569 +
14570 +                /* discard pending signals */
14571 +                while (signal_pending(current)) {
14572 +                        spin_lock_irq(&current->sigmask_lock);
14573 +                        dequeue_signal(&current->blocked,&sinfo);
14574 +                        spin_unlock_irq(&current->sigmask_lock);
14575 +                }
14576 +
14577 +       } while (!die);
14578 +
14579 +       /* and that's all */
14580 +       complete_and_exit(&rxrpc_krxsecd_dead,0);
14581 +
14582 +} /* end rxrpc_krxsecd() */
14583 +
14584 +/*****************************************************************************/
14585 +/*
14586 + * start up a krxsecd daemon
14587 + */
14588 +int __init rxrpc_krxsecd_init(void)
14589 +{
14590 +       return kernel_thread(rxrpc_krxsecd,NULL,0);
14591 +
14592 +} /* end rxrpc_krxsecd_init() */
14593 +
14594 +/*****************************************************************************/
14595 +/*
14596 + * kill the krxsecd daemon and wait for it to complete
14597 + */
14598 +void rxrpc_krxsecd_kill(void)
14599 +{
14600 +       rxrpc_krxsecd_die = 1;
14601 +       wake_up_all(&rxrpc_krxsecd_sleepq);
14602 +       wait_for_completion(&rxrpc_krxsecd_dead);
14603 +
14604 +} /* end rxrpc_krxsecd_kill() */
14605 +
14606 +/*****************************************************************************/
14607 +/*
14608 + * clear all pending incoming calls for the specified transport
14609 + */
14610 +void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
14611 +{
14612 +       LIST_HEAD(tmp);
14613 +
14614 +       struct rxrpc_message *msg;
14615 +       struct list_head *_p, *_n;
14616 +
14617 +       _enter("%p",trans);
14618 +
14619 +       /* move all the messages for this transport onto a temp list */
14620 +       spin_lock(&rxrpc_krxsecd_initmsgq_lock);
14621 +
14622 +       list_for_each_safe(_p,_n,&rxrpc_krxsecd_initmsgq) {
14623 +               msg = list_entry(_p,struct rxrpc_message,link);
14624 +               if (msg->trans==trans) {
14625 +                       list_del(&msg->link);
14626 +                       list_add_tail(&msg->link,&tmp);
14627 +                       atomic_dec(&rxrpc_krxsecd_qcount);
14628 +               }
14629 +       }
14630 +
14631 +       spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
14632 +
14633 +       /* zap all messages on the temp list */
14634 +       while (!list_empty(&tmp)) {
14635 +               msg = list_entry(tmp.next,struct rxrpc_message,link);
14636 +               list_del_init(&msg->link);
14637 +               rxrpc_put_message(msg);
14638 +       }
14639 +
14640 +       _leave("");
14641 +} /* end rxrpc_krxsecd_clear_transport() */
14642 +
14643 +/*****************************************************************************/
14644 +/*
14645 + * queue a message on the incoming calls list
14646 + */
14647 +void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg)
14648 +{
14649 +       _enter("%p",msg);
14650 +
14651 +       /* queue for processing by krxsecd */
14652 +       spin_lock(&rxrpc_krxsecd_initmsgq_lock);
14653 +
14654 +       if (!rxrpc_krxsecd_die) {
14655 +               rxrpc_get_message(msg);
14656 +               list_add_tail(&msg->link,&rxrpc_krxsecd_initmsgq);
14657 +               atomic_inc(&rxrpc_krxsecd_qcount);
14658 +       }
14659 +
14660 +       spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
14661 +
14662 +       wake_up(&rxrpc_krxsecd_sleepq);
14663 +
14664 +       _leave("");
14665 +} /* end rxrpc_krxsecd_queue_incoming_call() */
14666 +
14667 +/*****************************************************************************/
14668 +/*
14669 + * process the initial message of an incoming call
14670 + */
14671 +void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg)
14672 +{
14673 +       struct rxrpc_transport *trans = msg->trans;
14674 +       struct rxrpc_service *srv;
14675 +       struct rxrpc_call *call;
14676 +       struct list_head *_p;
14677 +       unsigned short sid;
14678 +       int ret;
14679 +
14680 +       _enter("%p{tr=%p}",msg,trans);
14681 +
14682 +       ret = rxrpc_incoming_call(msg->conn,msg,&call);
14683 +       if (ret<0)
14684 +               goto out;
14685 +
14686 +       /* find the matching service on the transport */
14687 +       sid = ntohs(msg->hdr.serviceId);
14688 +       srv = NULL;
14689 +
14690 +       spin_lock(&trans->lock);
14691 +       list_for_each(_p,&trans->services) {
14692 +               srv = list_entry(_p,struct rxrpc_service,link);
14693 +               if (srv->service_id==sid && try_inc_mod_count(srv->owner)) {
14694 +                       /* found a match (made sure it won't vanish) */
14695 +                       _debug("found service '%s'",srv->name);
14696 +                       call->owner = srv->owner;
14697 +                       break;
14698 +               }
14699 +       }
14700 +       spin_unlock(&trans->lock);
14701 +
14702 +       /* report the new connection
14703 +        * - the func must inc the call's usage count to keep it
14704 +        */
14705 +       ret = -ENOENT;
14706 +       if (_p!=&trans->services) {
14707 +               /* attempt to accept the call */
14708 +               call->conn->service = srv;
14709 +               call->app_attn_func = srv->attn_func;
14710 +               call->app_error_func = srv->error_func;
14711 +               call->app_aemap_func = srv->aemap_func;
14712 +
14713 +               ret = srv->new_call(call);
14714 +
14715 +               /* send an abort if an error occurred */
14716 +               if (ret<0) {
14717 +                       rxrpc_call_abort(call,ret);
14718 +               }
14719 +               else {
14720 +                       /* formally receive and ACK the new packet */
14721 +                       ret = rxrpc_conn_receive_call_packet(call->conn,call,msg);
14722 +               }
14723 +       }
14724 +
14725 +       rxrpc_put_call(call);
14726 + out:
14727 +       if (ret<0)
14728 +               rxrpc_trans_immediate_abort(trans,msg,ret);
14729 +
14730 +       _leave(" (%d)",ret);
14731 +} /* end rxrpc_krxsecd_process_incoming_call() */
14732 diff -urNp linux-5240/net/rxrpc/krxtimod.c linux-5250/net/rxrpc/krxtimod.c
14733 --- linux-5240/net/rxrpc/krxtimod.c     1970-01-01 01:00:00.000000000 +0100
14734 +++ linux-5250/net/rxrpc/krxtimod.c     
14735 @@ -0,0 +1,216 @@
14736 +/* krxtimod.c: RXRPC timeout daemon
14737 + *
14738 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
14739 + * Written by David Howells (dhowells@redhat.com)
14740 + *
14741 + * This program is free software; you can redistribute it and/or
14742 + * modify it under the terms of the GNU General Public License
14743 + * as published by the Free Software Foundation; either version
14744 + * 2 of the License, or (at your option) any later version.
14745 + */
14746 +
14747 +#include <linux/version.h>
14748 +#include <linux/module.h>
14749 +#include <linux/init.h>
14750 +#include <linux/sched.h>
14751 +#include <linux/completion.h>
14752 +#include <rxrpc/rxrpc.h>
14753 +#include <rxrpc/krxtimod.h>
14754 +#include <asm/errno.h>
14755 +#include "internal.h"
14756 +
14757 +static DECLARE_COMPLETION(krxtimod_alive);
14758 +static DECLARE_COMPLETION(krxtimod_dead);
14759 +static DECLARE_WAIT_QUEUE_HEAD(krxtimod_sleepq);
14760 +static int krxtimod_die;
14761 +
14762 +static LIST_HEAD(krxtimod_list);
14763 +static spinlock_t krxtimod_lock = SPIN_LOCK_UNLOCKED;
14764 +
14765 +static int krxtimod(void *arg);
14766 +
14767 +/*****************************************************************************/
14768 +/*
14769 + * start the timeout daemon
14770 + */
14771 +int rxrpc_krxtimod_start(void)
14772 +{
14773 +       int ret;
14774 +
14775 +       ret = kernel_thread(krxtimod,NULL,0);
14776 +       if (ret<0)
14777 +               return ret;
14778 +
14779 +       wait_for_completion(&krxtimod_alive);
14780 +
14781 +       return ret;
14782 +} /* end rxrpc_krxtimod_start() */
14783 +
14784 +/*****************************************************************************/
14785 +/*
14786 + * stop the timeout daemon
14787 + */
14788 +void rxrpc_krxtimod_kill(void)
14789 +{
14790 +       /* get rid of my daemon */
14791 +       krxtimod_die = 1;
14792 +       wake_up(&krxtimod_sleepq);
14793 +       wait_for_completion(&krxtimod_dead);
14794 +
14795 +} /* end rxrpc_krxtimod_kill() */
14796 +
14797 +/*****************************************************************************/
14798 +/*
14799 + * timeout processing daemon
14800 + */
14801 +static int krxtimod(void *arg)
14802 +{
14803 +       DECLARE_WAITQUEUE(myself,current);
14804 +
14805 +       rxrpc_timer_t *timer;
14806 +
14807 +       printk("Started krxtimod %d\n",current->pid);
14808 +       strcpy(current->comm,"krxtimod");
14809 +
14810 +       daemonize();
14811 +
14812 +       complete(&krxtimod_alive);
14813 +
14814 +       /* only certain signals are of interest */
14815 +       spin_lock_irq(&current->sigmask_lock);
14816 +       siginitsetinv(&current->blocked,0);
14817 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
14818 +       recalc_sigpending();
14819 +#else
14820 +       recalc_sigpending(current);
14821 +#endif
14822 +       spin_unlock_irq(&current->sigmask_lock);
14823 +
14824 +       /* loop around looking for things to attend to */
14825 + loop:
14826 +       set_current_state(TASK_INTERRUPTIBLE);
14827 +       add_wait_queue(&krxtimod_sleepq,&myself);
14828 +
14829 +       for (;;) {
14830 +               unsigned long jif;
14831 +               signed long timeout;
14832 +
14833 +               /* deal with the server being asked to die */
14834 +               if (krxtimod_die) {
14835 +                       remove_wait_queue(&krxtimod_sleepq,&myself);
14836 +                       _leave("");
14837 +                       complete_and_exit(&krxtimod_dead,0);
14838 +               }
14839 +
14840 +               /* discard pending signals */
14841 +               while (signal_pending(current)) {
14842 +                       siginfo_t sinfo;
14843 +
14844 +                       spin_lock_irq(&current->sigmask_lock);
14845 +                       dequeue_signal(&current->blocked,&sinfo);
14846 +                       spin_unlock_irq(&current->sigmask_lock);
14847 +               }
14848 +
14849 +               /* work out the time to elapse before the next event */
14850 +               spin_lock(&krxtimod_lock);
14851 +               if (list_empty(&krxtimod_list)) {
14852 +                       timeout = MAX_SCHEDULE_TIMEOUT;
14853 +               }
14854 +               else {
14855 +                       timer = list_entry(krxtimod_list.next,rxrpc_timer_t,link);
14856 +                       timeout = timer->timo_jif;
14857 +                       jif = jiffies;
14858 +
14859 +                       if (time_before_eq(timeout,jif))
14860 +                               goto immediate;
14861 +
14862 +                       else {
14863 +                               timeout = (long)timeout - (long)jiffies;
14864 +                       }
14865 +               }
14866 +               spin_unlock(&krxtimod_lock);
14867 +
14868 +               schedule_timeout(timeout);
14869 +
14870 +               set_current_state(TASK_INTERRUPTIBLE);
14871 +       }
14872 +
14873 +       /* the thing on the front of the queue needs processing
14874 +        * - we come here with the lock held and timer pointing to the expired entry
14875 +        */
14876 + immediate:
14877 +       remove_wait_queue(&krxtimod_sleepq,&myself);
14878 +       set_current_state(TASK_RUNNING);
14879 +
14880 +       _debug("@@@ Begin Timeout of %p",timer);
14881 +
14882 +       /* dequeue the timer */
14883 +       list_del_init(&timer->link);
14884 +       spin_unlock(&krxtimod_lock);
14885 +
14886 +       /* call the timeout function */
14887 +       timer->ops->timed_out(timer);
14888 +
14889 +       _debug("@@@ End Timeout");
14890 +       goto loop;
14891 +
14892 +} /* end krxtimod() */
14893 +
14894 +/*****************************************************************************/
14895 +/*
14896 + * (re-)queue a timer
14897 + */
14898 +void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout)
14899 +{
14900 +       struct list_head *_p;
14901 +       rxrpc_timer_t *ptimer;
14902 +
14903 +       _enter("%p,%lu",timer,timeout);
14904 +
14905 +       spin_lock(&krxtimod_lock);
14906 +
14907 +       list_del(&timer->link);
14908 +
14909 +       /* the timer was deferred or reset - put it back in the queue at the right place */
14910 +       timer->timo_jif = jiffies + timeout;
14911 +
14912 +       list_for_each(_p,&krxtimod_list) {
14913 +               ptimer = list_entry(_p,rxrpc_timer_t,link);
14914 +               if (time_before(timer->timo_jif,ptimer->timo_jif))
14915 +                       break;
14916 +       }
14917 +
14918 +       list_add_tail(&timer->link,_p); /* insert before stopping point */
14919 +
14920 +       spin_unlock(&krxtimod_lock);
14921 +
14922 +       wake_up(&krxtimod_sleepq);
14923 +
14924 +       _leave("");
14925 +} /* end rxrpc_krxtimod_queue_vlocation() */
14926 +
14927 +/*****************************************************************************/
14928 +/*
14929 + * dequeue a timer
14930 + * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
14931 + */
14932 +int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer)
14933 +{
14934 +       int ret = 0;
14935 +
14936 +       _enter("%p",timer);
14937 +
14938 +       spin_lock(&krxtimod_lock);
14939 +
14940 +       if (list_empty(&timer->link))
14941 +               ret = -ENOENT;
14942 +       else
14943 +               list_del_init(&timer->link);
14944 +
14945 +       spin_unlock(&krxtimod_lock);
14946 +
14947 +       wake_up(&krxtimod_sleepq);
14948 +
14949 +       _leave(" = %d",ret);
14950 +       return ret;
14951 +} /* end rxrpc_krxtimod_del_timer() */
14952 diff -urNp linux-5240/net/rxrpc/main.c linux-5250/net/rxrpc/main.c
14953 --- linux-5240/net/rxrpc/main.c 1970-01-01 01:00:00.000000000 +0100
14954 +++ linux-5250/net/rxrpc/main.c 
14955 @@ -0,0 +1,127 @@
14956 +/* main.c: Rx RPC interface
14957 + *
14958 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
14959 + * Written by David Howells (dhowells@redhat.com)
14960 + *
14961 + * This program is free software; you can redistribute it and/or
14962 + * modify it under the terms of the GNU General Public License
14963 + * as published by the Free Software Foundation; either version
14964 + * 2 of the License, or (at your option) any later version.
14965 + */
14966 +
14967 +#include <linux/module.h>
14968 +#include <linux/init.h>
14969 +#include <linux/sched.h>
14970 +#include <rxrpc/rxrpc.h>
14971 +#include <rxrpc/krxiod.h>
14972 +#include <rxrpc/krxsecd.h>
14973 +#include <rxrpc/krxtimod.h>
14974 +#include <rxrpc/transport.h>
14975 +#include <rxrpc/connection.h>
14976 +#include <rxrpc/call.h>
14977 +#include <rxrpc/message.h>
14978 +#include "internal.h"
14979 +
14980 +static int rxrpc_initialise(void);
14981 +static void rxrpc_cleanup(void);
14982 +
14983 +module_init(rxrpc_initialise);
14984 +module_exit(rxrpc_cleanup);
14985 +
14986 +MODULE_DESCRIPTION("Rx RPC implementation");
14987 +MODULE_AUTHOR("Red Hat, Inc.");
14988 +MODULE_LICENSE("GPL");
14989 +
14990 +u32 rxrpc_epoch;
14991 +
14992 +/*****************************************************************************/
14993 +/*
14994 + * initialise the Rx module
14995 + */
14996 +static int rxrpc_initialise(void)
14997 +{
14998 +       int ret;
14999 +
15000 +       /* my epoch value */
15001 +       rxrpc_epoch = htonl(xtime.tv_sec);
15002 +
15003 +       /* register the /proc interface */
15004 +#ifdef CONFIG_PROC_FS
15005 +       ret = rxrpc_proc_init();
15006 +       if (ret<0)
15007 +               return ret;
15008 +#endif
15009 +
15010 +       /* register the sysctl files */
15011 +#ifdef CONFIG_SYSCTL
15012 +       ret = rxrpc_sysctl_init();
15013 +       if (ret<0)
15014 +               goto error_proc;
15015 +#endif
15016 +
15017 +       /* start the krxtimod daemon */
15018 +       ret = rxrpc_krxtimod_start();
15019 +       if (ret<0)
15020 +               goto error_sysctl;
15021 +
15022 +       /* start the krxiod daemon */
15023 +       ret = rxrpc_krxiod_init();
15024 +       if (ret<0)
15025 +               goto error_krxtimod;
15026 +
15027 +       /* start the krxsecd daemon */
15028 +       ret = rxrpc_krxsecd_init();
15029 +       if (ret<0)
15030 +               goto error_krxiod;
15031 +
15032 +       kdebug("\n\n");
15033 +
15034 +       return 0;
15035 +
15036 + error_krxiod:
15037 +       rxrpc_krxiod_kill();
15038 + error_krxtimod:
15039 +       rxrpc_krxtimod_kill();
15040 + error_sysctl:
15041 +#ifdef CONFIG_SYSCTL
15042 +       rxrpc_sysctl_cleanup();
15043 +#endif
15044 + error_proc:
15045 +#ifdef CONFIG_PROC_FS
15046 +       rxrpc_proc_cleanup();
15047 +#endif
15048 +       return ret;
15049 +} /* end rxrpc_initialise() */
15050 +
15051 +/*****************************************************************************/
15052 +/*
15053 + * clean up the Rx module
15054 + */
15055 +static void rxrpc_cleanup(void)
15056 +{
15057 +       kenter("");
15058 +
15059 +       __RXACCT(printk("Outstanding Messages   : %d\n",atomic_read(&rxrpc_message_count)));
15060 +       __RXACCT(printk("Outstanding Calls      : %d\n",atomic_read(&rxrpc_call_count)));
15061 +       __RXACCT(printk("Outstanding Connections: %d\n",atomic_read(&rxrpc_connection_count)));
15062 +       __RXACCT(printk("Outstanding Peers      : %d\n",atomic_read(&rxrpc_peer_count)));
15063 +       __RXACCT(printk("Outstanding Transports : %d\n",atomic_read(&rxrpc_transport_count)));
15064 +
15065 +       rxrpc_krxsecd_kill();
15066 +       rxrpc_krxiod_kill();
15067 +       rxrpc_krxtimod_kill();
15068 +#ifdef CONFIG_SYSCTL
15069 +       rxrpc_sysctl_cleanup();
15070 +#endif
15071 +#ifdef CONFIG_PROC_FS
15072 +       rxrpc_proc_cleanup();
15073 +#endif
15074 +
15075 +       __RXACCT(printk("Outstanding Messages   : %d\n",atomic_read(&rxrpc_message_count)));
15076 +       __RXACCT(printk("Outstanding Calls      : %d\n",atomic_read(&rxrpc_call_count)));
15077 +       __RXACCT(printk("Outstanding Connections: %d\n",atomic_read(&rxrpc_connection_count)));
15078 +       __RXACCT(printk("Outstanding Peers      : %d\n",atomic_read(&rxrpc_peer_count)));
15079 +       __RXACCT(printk("Outstanding Transports : %d\n",atomic_read(&rxrpc_transport_count)));
15080 +
15081 +       kleave();
15082 +} /* end rxrpc_cleanup() */
15083 diff -urNp linux-5240/net/rxrpc/Makefile linux-5250/net/rxrpc/Makefile
15084 --- linux-5240/net/rxrpc/Makefile       1970-01-01 01:00:00.000000000 +0100
15085 +++ linux-5250/net/rxrpc/Makefile       
15086 @@ -0,0 +1,31 @@
15087 +#
15088 +# Makefile for Linux kernel Rx RPC
15089 +#
15090 +
15091 +export-objs := rxrpc_syms.o
15092 +
15093 +rxrpc-objs := \
15094 +       call.o \
15095 +       connection.o \
15096 +       krxiod.o \
15097 +       krxsecd.o \
15098 +       krxtimod.o \
15099 +       main.o \
15100 +       peer.o \
15101 +       rxrpc_syms.o \
15102 +       transport.o
15103 +
15104 +#ifeq ($(CONFIG_PROC_FS),y)
15105 +rxrpc-objs     += proc.o
15106 +#endif
15107 +#ifeq ($(CONFIG_SYSCTL),y)
15108 +rxrpc-objs += sysctl.o
15109 +#endif
15110 +
15111 +obj-m  := rxrpc.o
15112 +
15113 +# superfluous for 2.5, but needed for 2.4..
15114 +rxrpc.o: $(rxrpc-objs)
15115 +       $(LD) -r -o $@ $(rxrpc-objs)
15116 +
15117 +include $(TOPDIR)/Rules.make
15118 diff -urNp linux-5240/net/rxrpc/peer.c linux-5250/net/rxrpc/peer.c
15119 --- linux-5240/net/rxrpc/peer.c 1970-01-01 01:00:00.000000000 +0100
15120 +++ linux-5250/net/rxrpc/peer.c 
15121 @@ -0,0 +1,380 @@
15122 +/* peer.c: Rx RPC peer management
15123 + *
15124 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
15125 + * Written by David Howells (dhowells@redhat.com)
15126 + *
15127 + * This program is free software; you can redistribute it and/or
15128 + * modify it under the terms of the GNU General Public License
15129 + * as published by the Free Software Foundation; either version
15130 + * 2 of the License, or (at your option) any later version.
15131 + */
15132 +
15133 +#include <linux/sched.h>
15134 +#include <linux/slab.h>
15135 +#include <linux/module.h>
15136 +#include <rxrpc/rxrpc.h>
15137 +#include <rxrpc/transport.h>
15138 +#include <rxrpc/peer.h>
15139 +#include <rxrpc/connection.h>
15140 +#include <rxrpc/call.h>
15141 +#include <rxrpc/message.h>
15142 +#include <linux/udp.h>
15143 +#include <linux/ip.h>
15144 +#include <net/sock.h>
15145 +#include <asm/uaccess.h>
15146 +#include <asm/div64.h>
15147 +#include "internal.h"
15148 +
15149 +__RXACCT_DECL(atomic_t rxrpc_peer_count);
15150 +LIST_HEAD(rxrpc_peers);
15151 +DECLARE_RWSEM(rxrpc_peers_sem);
15152 +
15153 +static void __rxrpc_peer_timeout(rxrpc_timer_t *timer)
15154 +{
15155 +       struct rxrpc_peer *peer = list_entry(timer,struct rxrpc_peer,timeout);
15156 +
15157 +       _debug("Rx PEER TIMEOUT [%p{u=%d}]",peer,atomic_read(&peer->usage));
15158 +
15159 +       rxrpc_peer_do_timeout(peer);
15160 +}
15161 +
15162 +static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = {
15163 +       timed_out:      __rxrpc_peer_timeout,
15164 +};
15165 +
15166 +/*****************************************************************************/
15167 +/*
15168 + * create a peer record
15169 + */
15170 +static int __rxrpc_create_peer(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer **_peer)
15171 +{
15172 +       struct rxrpc_peer *peer;
15173 +
15174 +       _enter("%p,%08x",trans,ntohl(addr));
15175 +
15176 +       /* allocate and initialise a peer record */
15177 +       peer = kmalloc(sizeof(struct rxrpc_peer),GFP_KERNEL);
15178 +       if (!peer) {
15179 +               _leave(" = -ENOMEM");
15180 +               return -ENOMEM;
15181 +       }
15182 +
15183 +       memset(peer,0,sizeof(struct rxrpc_peer));
15184 +       atomic_set(&peer->usage,1);
15185 +
15186 +       INIT_LIST_HEAD(&peer->link);
15187 +       INIT_LIST_HEAD(&peer->proc_link);
15188 +       INIT_LIST_HEAD(&peer->conn_active);
15189 +       INIT_LIST_HEAD(&peer->conn_graveyard);
15190 +       spin_lock_init(&peer->conn_gylock);
15191 +       init_waitqueue_head(&peer->conn_gy_waitq);
15192 +       rwlock_init(&peer->conn_lock);
15193 +       atomic_set(&peer->conn_count,0);
15194 +       spin_lock_init(&peer->lock);
15195 +       rxrpc_timer_init(&peer->timeout,&rxrpc_peer_timer_ops);
15196 +
15197 +       peer->addr.s_addr = addr;
15198 +
15199 +       peer->trans = trans;
15200 +       peer->ops = trans->peer_ops;
15201 +
15202 +       __RXACCT(atomic_inc(&rxrpc_peer_count));
15203 +       *_peer = peer;
15204 +       _leave(" = 0 (%p)",peer);
15205 +
15206 +       return 0;
15207 +} /* end __rxrpc_create_peer() */
15208 +
15209 +/*****************************************************************************/
15210 +/*
15211 + * find a peer record on the specified transport
15212 + * - returns (if successful) with peer record usage incremented
15213 + * - resurrects it from the graveyard if found there
15214 + */
15215 +int rxrpc_peer_lookup(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer **_peer)
15216 +{
15217 +       struct rxrpc_peer *peer, *candidate = NULL;
15218 +       struct list_head *_p;
15219 +       int ret;
15220 +
15221 +       _enter("%p{%hu},%08x",trans,trans->port,ntohl(addr));
15222 +
15223 +       /* [common case] search the transport's active list first */
15224 +       read_lock(&trans->peer_lock);
15225 +       list_for_each(_p,&trans->peer_active) {
15226 +               peer = list_entry(_p,struct rxrpc_peer,link);
15227 +               if (peer->addr.s_addr==addr)
15228 +                       goto found_active;
15229 +       }
15230 +       read_unlock(&trans->peer_lock);
15231 +
15232 +       /* [uncommon case] not active - create a candidate for a new record */
15233 +       ret = __rxrpc_create_peer(trans,addr,&candidate);
15234 +       if (ret<0) {
15235 +               _leave(" = %d",ret);
15236 +               return ret;
15237 +       }
15238 +
15239 +       /* search the active list again, just in case it appeared whilst we were busy */
15240 +       write_lock(&trans->peer_lock);
15241 +       list_for_each(_p,&trans->peer_active) {
15242 +               peer = list_entry(_p,struct rxrpc_peer,link);
15243 +               if (peer->addr.s_addr==addr)
15244 +                       goto found_active_second_chance;
15245 +       }
15246 +
15247 +       /* search the transport's graveyard list */
15248 +       spin_lock(&trans->peer_gylock);
15249 +       list_for_each(_p,&trans->peer_graveyard) {
15250 +               peer = list_entry(_p,struct rxrpc_peer,link);
15251 +               if (peer->addr.s_addr==addr)
15252 +                       goto found_in_graveyard;
15253 +       }
15254 +       spin_unlock(&trans->peer_gylock);
15255 +
15256 +       /* we can now add the new candidate to the list
15257 +        * - tell the application layer that this peer has been added
15258 +        */
15259 +       rxrpc_get_transport(trans);
15260 +       peer = candidate;
15261 +       candidate = NULL;
15262 +
15263 +       if (peer->ops && peer->ops->adding) {
15264 +               ret = peer->ops->adding(peer);
15265 +               if (ret<0) {
15266 +                       write_unlock(&trans->peer_lock);
15267 +                       __RXACCT(atomic_dec(&rxrpc_peer_count));
15268 +                       kfree(peer);
15269 +                       rxrpc_put_transport(trans);
15270 +                       _leave(" = %d",ret);
15271 +                       return ret;
15272 +               }
15273 +       }
15274 +
15275 +       atomic_inc(&trans->peer_count);
15276 +
15277 + make_active:
15278 +       list_add_tail(&peer->link,&trans->peer_active);
15279 +
15280 + success_uwfree:
15281 +       write_unlock(&trans->peer_lock);
15282 +
15283 +       if (candidate) {
15284 +               __RXACCT(atomic_dec(&rxrpc_peer_count));
15285 +               kfree(candidate);
15286 +       }
15287 +
15288 +       if (list_empty(&peer->proc_link)) {
15289 +               down_write(&rxrpc_peers_sem);
15290 +               list_add_tail(&peer->proc_link,&rxrpc_peers);
15291 +               up_write(&rxrpc_peers_sem);
15292 +       }
15293 +
15294 + success:
15295 +       *_peer = peer;
15296 +
15297 +       _leave(" = 0 (%p{u=%d cc=%d})",
15298 +              peer,atomic_read(&peer->usage),atomic_read(&peer->conn_count));
15299 +       return 0;
15300 +
15301 +       /* handle the peer being found in the active list straight off */
15302 + found_active:
15303 +       rxrpc_get_peer(peer);
15304 +       read_unlock(&trans->peer_lock);
15305 +       goto success;
15306 +
15307 +       /* handle resurrecting a peer from the graveyard */
15308 + found_in_graveyard:
15309 +       rxrpc_get_peer(peer);
15310 +       rxrpc_get_transport(peer->trans);
15311 +       rxrpc_krxtimod_del_timer(&peer->timeout);
15312 +       list_del_init(&peer->link);
15313 +       spin_unlock(&trans->peer_gylock);
15314 +       goto make_active;
15315 +
15316 +       /* handle finding the peer on the second time through the active list */
15317 + found_active_second_chance:
15318 +       rxrpc_get_peer(peer);
15319 +       goto success_uwfree;
15320 +
15321 +} /* end rxrpc_peer_lookup() */
15322 +
15323 +/*****************************************************************************/
15324 +/*
15325 + * finish with a peer record
15326 + * - it gets sent to the graveyard from where it can be resurrected or timed out
15327 + */
15328 +void rxrpc_put_peer(struct rxrpc_peer *peer)
15329 +{
15330 +       struct rxrpc_transport *trans = peer->trans;
15331 +
15332 +       _enter("%p{cc=%d a=%08x}",peer,atomic_read(&peer->conn_count),ntohl(peer->addr.s_addr));
15333 +
15334 +       /* sanity check */
15335 +       if (atomic_read(&peer->usage)<=0)
15336 +               BUG();
15337 +
15338 +       write_lock(&trans->peer_lock);
15339 +       spin_lock(&trans->peer_gylock);
15340 +       if (likely(!atomic_dec_and_test(&peer->usage))) {
15341 +               spin_unlock(&trans->peer_gylock);
15342 +               write_unlock(&trans->peer_lock);
15343 +               _leave("");
15344 +               return;
15345 +       }
15346 +
15347 +       /* move to graveyard queue */
15348 +       list_del(&peer->link);
15349 +       write_unlock(&trans->peer_lock);
15350 +
15351 +       list_add_tail(&peer->link,&trans->peer_graveyard);
15352 +
15353 +       if (!list_empty(&peer->conn_active)) BUG();
15354 +
15355 +       /* discard in 600 secs */
15356 +       rxrpc_krxtimod_add_timer(&peer->timeout,100*HZ);
15357 +
15358 +       spin_unlock(&trans->peer_gylock);
15359 +
15360 +       rxrpc_put_transport(trans);
15361 +
15362 +       _leave(" [killed]");
15363 +} /* end rxrpc_put_peer() */
15364 +
15365 +/*****************************************************************************/
15366 +/*
15367 + * handle a peer timing out in the graveyard
15368 + * - called from krxtimod
15369 + */
15370 +void rxrpc_peer_do_timeout(struct rxrpc_peer *peer)
15371 +{
15372 +       struct rxrpc_transport *trans = peer->trans;
15373 +
15374 +       _enter("%p{u=%d cc=%d a=%08x}",
15375 +              peer,atomic_read(&peer->usage),atomic_read(&peer->conn_count),
15376 +              ntohl(peer->addr.s_addr));
15377 +
15378 +       if (atomic_read(&peer->usage)<0)
15379 +               BUG();
15380 +
15381 +       /* remove from graveyard if still dead */
15382 +       spin_lock(&trans->peer_gylock);
15383 +       if (atomic_read(&peer->usage)==0)
15384 +               list_del_init(&peer->link);
15385 +       else
15386 +               peer = NULL;
15387 +       spin_unlock(&trans->peer_gylock);
15388 +
15389 +       if (!peer) {
15390 +               _leave("");
15391 +               return; /* resurrected */
15392 +       }
15393 +
15394 +       /* clear all connections on this peer */
15395 +       rxrpc_conn_clearall(peer);
15396 +
15397 +       if (!list_empty(&peer->conn_active)) BUG();
15398 +       if (!list_empty(&peer->conn_graveyard)) BUG();
15399 +
15400 +       /* inform the application layer */
15401 +       if (peer->ops && peer->ops->discarding)
15402 +               peer->ops->discarding(peer);
15403 +
15404 +       if (!list_empty(&peer->proc_link)) {
15405 +               down_write(&rxrpc_peers_sem);
15406 +               list_del(&peer->proc_link);
15407 +               up_write(&rxrpc_peers_sem);
15408 +       }
15409 +
15410 +       __RXACCT(atomic_dec(&rxrpc_peer_count));
15411 +       kfree(peer);
15412 +
15413 +       /* if the graveyard is now empty, wake up anyone waiting for that */
15414 +       if (atomic_dec_and_test(&trans->peer_count))
15415 +               wake_up(&trans->peer_gy_waitq);
15416 +
15417 +       _leave(" [destroyed]");
15418 +} /* end rxrpc_peer_do_timeout() */
15419 +
15420 +/*****************************************************************************/
15421 +/*
15422 + * clear all peer records from a transport endpoint
15423 + */
15424 +void rxrpc_peer_clearall(struct rxrpc_transport *trans)
15425 +{
15426 +       DECLARE_WAITQUEUE(myself,current);
15427 +
15428 +       struct rxrpc_peer *peer;
15429 +       int err;
15430 +
15431 +       _enter("%p",trans);
15432 +
15433 +       /* there shouldn't be any active peers remaining */
15434 +       if (!list_empty(&trans->peer_active))
15435 +               BUG();
15436 +
15437 +       /* manually timeout all peers in the graveyard */
15438 +       spin_lock(&trans->peer_gylock);
15439 +       while (!list_empty(&trans->peer_graveyard)) {
15440 +               peer = list_entry(trans->peer_graveyard.next,struct rxrpc_peer,link);
15441 +               _debug("Clearing peer %p\n",peer);
15442 +               err = rxrpc_krxtimod_del_timer(&peer->timeout);
15443 +               spin_unlock(&trans->peer_gylock);
15444 +
15445 +               if (err==0)
15446 +                       rxrpc_peer_do_timeout(peer);
15447 +
15448 +               spin_lock(&trans->peer_gylock);
15449 +       }
15450 +       spin_unlock(&trans->peer_gylock);
15451 +
15452 +       /* wait for the the peer graveyard to be completely cleared */
15453 +       set_current_state(TASK_UNINTERRUPTIBLE);
15454 +       add_wait_queue(&trans->peer_gy_waitq,&myself);
15455 +
15456 +       while (atomic_read(&trans->peer_count)!=0) {
15457 +               schedule();
15458 +               set_current_state(TASK_UNINTERRUPTIBLE);
15459 +       }
15460 +
15461 +       remove_wait_queue(&trans->peer_gy_waitq,&myself);
15462 +       set_current_state(TASK_RUNNING);
15463 +
15464 +       _leave("");
15465 +
15466 +} /* end rxrpc_peer_clearall() */
15467 +
15468 +/*****************************************************************************/
15469 +/*
15470 + * calculate and cache the Round-Trip-Time for a message and its response
15471 + */
15472 +void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
15473 +                             struct rxrpc_message *msg,
15474 +                             struct rxrpc_message *resp)
15475 +{
15476 +       unsigned long long rtt;
15477 +       int loop;
15478 +
15479 +       _enter("%p,%p,%p",peer,msg,resp);
15480 +
15481 +       /* calculate the latest RTT */
15482 +       rtt = resp->stamp.tv_sec - msg->stamp.tv_sec;
15483 +       rtt *= 1000000UL;
15484 +       rtt += resp->stamp.tv_usec - msg->stamp.tv_usec;
15485 +
15486 +       /* add to cache */
15487 +       peer->rtt_cache[peer->rtt_point] = rtt;
15488 +       peer->rtt_point++;
15489 +       peer->rtt_point %= RXRPC_RTT_CACHE_SIZE;
15490 +
15491 +       if (peer->rtt_usage<RXRPC_RTT_CACHE_SIZE) peer->rtt_usage++;
15492 +
15493 +       /* recalculate RTT */
15494 +       for (loop=peer->rtt_usage-1; loop>=0; loop--)
15495 +               rtt += peer->rtt_cache[loop];
15496 +
15497 +       peer->rtt = do_div(rtt,peer->rtt_usage);
15498 +
15499 +       _leave(" RTT=%lu.%lums",peer->rtt/1000,peer->rtt%1000);
15500 +
15501 +} /* end rxrpc_peer_calculate_rtt() */
15502 diff -urNp linux-5240/net/rxrpc/proc.c linux-5250/net/rxrpc/proc.c
15503 --- linux-5240/net/rxrpc/proc.c 1970-01-01 01:00:00.000000000 +0100
15504 +++ linux-5250/net/rxrpc/proc.c 
15505 @@ -0,0 +1,612 @@
15506 +/* proc.c: /proc interface for RxRPC
15507 + *
15508 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
15509 + * Written by David Howells (dhowells@redhat.com)
15510 + *
15511 + * This program is free software; you can redistribute it and/or
15512 + * modify it under the terms of the GNU General Public License
15513 + * as published by the Free Software Foundation; either version
15514 + * 2 of the License, or (at your option) any later version.
15515 + */
15516 +
15517 +#include <linux/sched.h>
15518 +#include <linux/slab.h>
15519 +#include <linux/module.h>
15520 +#include <linux/proc_fs.h>
15521 +#include <linux/seq_file.h>
15522 +#include <rxrpc/rxrpc.h>
15523 +#include <rxrpc/transport.h>
15524 +#include <rxrpc/peer.h>
15525 +#include <rxrpc/connection.h>
15526 +#include <rxrpc/call.h>
15527 +#include <rxrpc/message.h>
15528 +#include "internal.h"
15529 +
15530 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
15531 +static inline struct proc_dir_entry *PDE(const struct inode *inode)
15532 +{
15533 +       return (struct proc_dir_entry *)inode->u.generic_ip;
15534 +}
15535 +#endif
15536 +
15537 +static struct proc_dir_entry *proc_rxrpc;
15538 +
15539 +static int rxrpc_proc_transports_open(struct inode *inode, struct file *file);
15540 +static void *rxrpc_proc_transports_start(struct seq_file *p, loff_t *pos);
15541 +static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos);
15542 +static void rxrpc_proc_transports_stop(struct seq_file *p, void *v);
15543 +static int rxrpc_proc_transports_show(struct seq_file *m, void *v);
15544 +
15545 +static struct seq_operations rxrpc_proc_transports_ops = {
15546 +       start:  rxrpc_proc_transports_start,
15547 +       next:   rxrpc_proc_transports_next,
15548 +       stop:   rxrpc_proc_transports_stop,
15549 +       show:   rxrpc_proc_transports_show,
15550 +};
15551 +
15552 +static struct file_operations rxrpc_proc_transports_fops = {
15553 +       open:           rxrpc_proc_transports_open,
15554 +       read:           seq_read,
15555 +       llseek:         seq_lseek,
15556 +       release:        seq_release,
15557 +};
15558 +
15559 +static int rxrpc_proc_peers_open(struct inode *inode, struct file *file);
15560 +static void *rxrpc_proc_peers_start(struct seq_file *p, loff_t *pos);
15561 +static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos);
15562 +static void rxrpc_proc_peers_stop(struct seq_file *p, void *v);
15563 +static int rxrpc_proc_peers_show(struct seq_file *m, void *v);
15564 +
15565 +static struct seq_operations rxrpc_proc_peers_ops = {
15566 +       start:  rxrpc_proc_peers_start,
15567 +       next:   rxrpc_proc_peers_next,
15568 +       stop:   rxrpc_proc_peers_stop,
15569 +       show:   rxrpc_proc_peers_show,
15570 +};
15571 +
15572 +static struct file_operations rxrpc_proc_peers_fops = {
15573 +       open:           rxrpc_proc_peers_open,
15574 +       read:           seq_read,
15575 +       llseek:         seq_lseek,
15576 +       release:        seq_release,
15577 +};
15578 +
15579 +static int rxrpc_proc_conns_open(struct inode *inode, struct file *file);
15580 +static void *rxrpc_proc_conns_start(struct seq_file *p, loff_t *pos);
15581 +static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos);
15582 +static void rxrpc_proc_conns_stop(struct seq_file *p, void *v);
15583 +static int rxrpc_proc_conns_show(struct seq_file *m, void *v);
15584 +
15585 +static struct seq_operations rxrpc_proc_conns_ops = {
15586 +       start:  rxrpc_proc_conns_start,
15587 +       next:   rxrpc_proc_conns_next,
15588 +       stop:   rxrpc_proc_conns_stop,
15589 +       show:   rxrpc_proc_conns_show,
15590 +};
15591 +
15592 +static struct file_operations rxrpc_proc_conns_fops = {
15593 +       open:           rxrpc_proc_conns_open,
15594 +       read:           seq_read,
15595 +       llseek:         seq_lseek,
15596 +       release:        seq_release,
15597 +};
15598 +
15599 +static int rxrpc_proc_calls_open(struct inode *inode, struct file *file);
15600 +static void *rxrpc_proc_calls_start(struct seq_file *p, loff_t *pos);
15601 +static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos);
15602 +static void rxrpc_proc_calls_stop(struct seq_file *p, void *v);
15603 +static int rxrpc_proc_calls_show(struct seq_file *m, void *v);
15604 +
15605 +static struct seq_operations rxrpc_proc_calls_ops = {
15606 +       start:  rxrpc_proc_calls_start,
15607 +       next:   rxrpc_proc_calls_next,
15608 +       stop:   rxrpc_proc_calls_stop,
15609 +       show:   rxrpc_proc_calls_show,
15610 +};
15611 +
15612 +static struct file_operations rxrpc_proc_calls_fops = {
15613 +       open:           rxrpc_proc_calls_open,
15614 +       read:           seq_read,
15615 +       llseek:         seq_lseek,
15616 +       release:        seq_release,
15617 +};
15618 +
15619 +static const char *rxrpc_call_states7[] = {
15620 +       "complet",
15621 +       "error  ",
15622 +       "rcv_op ",
15623 +       "rcv_arg",
15624 +       "got_arg",
15625 +       "snd_rpl",
15626 +       "fin_ack",
15627 +       "snd_arg",
15628 +       "rcv_rpl",
15629 +       "got_rpl"
15630 +};
15631 +
15632 +static const char *rxrpc_call_error_states7[] = {
15633 +       "no_err ",
15634 +       "loc_abt",
15635 +       "rmt_abt",
15636 +       "loc_err",
15637 +       "rmt_err"
15638 +};
15639 +
15640 +/*****************************************************************************/
15641 +/*
15642 + * initialise the /proc/net/rxrpc/ directory
15643 + */
15644 +int rxrpc_proc_init(void)
15645 +{
15646 +       struct proc_dir_entry *p;
15647 +
15648 +       proc_rxrpc = proc_mkdir("rxrpc",proc_net);
15649 +       if (!proc_rxrpc)
15650 +               goto error;
15651 +       proc_rxrpc->owner = THIS_MODULE;
15652 +
15653 +       p = create_proc_entry("calls",0,proc_rxrpc);
15654 +       if (!p)
15655 +               goto error_proc;
15656 +       p->proc_fops = &rxrpc_proc_calls_fops;
15657 +       p->owner = THIS_MODULE;
15658 +
15659 +       p = create_proc_entry("connections",0,proc_rxrpc);
15660 +       if (!p)
15661 +               goto error_calls;
15662 +       p->proc_fops = &rxrpc_proc_conns_fops;
15663 +       p->owner = THIS_MODULE;
15664 +
15665 +       p = create_proc_entry("peers",0,proc_rxrpc);
15666 +       if (!p)
15667 +               goto error_calls;
15668 +       p->proc_fops = &rxrpc_proc_peers_fops;
15669 +       p->owner = THIS_MODULE;
15670 +
15671 +       p = create_proc_entry("transports",0,proc_rxrpc);
15672 +       if (!p)
15673 +               goto error_conns;
15674 +       p->proc_fops = &rxrpc_proc_transports_fops;
15675 +       p->owner = THIS_MODULE;
15676 +
15677 +       return 0;
15678 +
15679 + error_conns:
15680 +       remove_proc_entry("conns",proc_rxrpc);
15681 + error_calls:
15682 +       remove_proc_entry("calls",proc_rxrpc);
15683 + error_proc:
15684 +       remove_proc_entry("rxrpc",proc_net);
15685 + error:
15686 +       return -ENOMEM;
15687 +} /* end rxrpc_proc_init() */
15688 +
15689 +/*****************************************************************************/
15690 +/*
15691 + * clean up the /proc/net/rxrpc/ directory
15692 + */
15693 +void rxrpc_proc_cleanup(void)
15694 +{
15695 +       remove_proc_entry("transports",proc_rxrpc);
15696 +       remove_proc_entry("peers",proc_rxrpc);
15697 +       remove_proc_entry("connections",proc_rxrpc);
15698 +       remove_proc_entry("calls",proc_rxrpc);
15699 +
15700 +       remove_proc_entry("rxrpc",proc_net);
15701 +
15702 +} /* end rxrpc_proc_cleanup() */
15703 +
15704 +/*****************************************************************************/
15705 +/*
15706 + * open "/proc/net/rxrpc/transports" which provides a summary of extant transports
15707 + */
15708 +static int rxrpc_proc_transports_open(struct inode *inode, struct file *file)
15709 +{
15710 +       struct seq_file *m;
15711 +       int ret;
15712 +
15713 +       ret = seq_open(file,&rxrpc_proc_transports_ops);
15714 +       if (ret<0)
15715 +               return ret;
15716 +
15717 +       m = file->private_data;
15718 +       m->private = PDE(inode)->data;
15719 +
15720 +       return 0;
15721 +} /* end rxrpc_proc_transports_open() */
15722 +
15723 +/*****************************************************************************/
15724 +/*
15725 + * set up the iterator to start reading from the transports list and return the first item
15726 + */
15727 +static void *rxrpc_proc_transports_start(struct seq_file *m, loff_t *_pos)
15728 +{
15729 +       struct list_head *_p;
15730 +       loff_t pos = *_pos;
15731 +
15732 +       /* lock the list against modification */
15733 +       down_read(&rxrpc_proc_transports_sem);
15734 +
15735 +       /* allow for the header line */
15736 +       if (!pos)
15737 +               return (void *)1;
15738 +       pos--;
15739 +
15740 +       /* find the n'th element in the list */
15741 +       list_for_each(_p,&rxrpc_proc_transports)
15742 +               if (!pos--)
15743 +                       break;
15744 +
15745 +       return _p!=&rxrpc_proc_transports ? _p : NULL;
15746 +} /* end rxrpc_proc_transports_start() */
15747 +
15748 +/*****************************************************************************/
15749 +/*
15750 + * move to next call in transports list
15751 + */
15752 +static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos)
15753 +{
15754 +       struct list_head *_p;
15755 +
15756 +       (*pos)++;
15757 +
15758 +       _p = v;
15759 +       _p = v==(void*)1 ? rxrpc_proc_transports.next : _p->next;
15760 +
15761 +       return _p!=&rxrpc_proc_transports ? _p : NULL;
15762 +} /* end rxrpc_proc_transports_next() */
15763 +
15764 +/*****************************************************************************/
15765 +/*
15766 + * clean up after reading from the transports list
15767 + */
15768 +static void rxrpc_proc_transports_stop(struct seq_file *p, void *v)
15769 +{
15770 +       up_read(&rxrpc_proc_transports_sem);
15771 +
15772 +} /* end rxrpc_proc_transports_stop() */
15773 +
15774 +/*****************************************************************************/
15775 +/*
15776 + * display a header line followed by a load of call lines
15777 + */
15778 +static int rxrpc_proc_transports_show(struct seq_file *m, void *v)
15779 +{
15780 +       struct rxrpc_transport *trans = list_entry(v,struct rxrpc_transport,proc_link);
15781 +
15782 +       /* display header on line 1 */
15783 +       if (v == (void *)1) {
15784 +               seq_puts(m, "LOCAL USE\n");
15785 +               return 0;
15786 +       }
15787 +
15788 +       /* display one transport per line on subsequent lines */
15789 +       seq_printf(m,"%5hu %3d\n",
15790 +                  trans->port,
15791 +                  atomic_read(&trans->usage)
15792 +                  );
15793 +
15794 +       return 0;
15795 +} /* end rxrpc_proc_transports_show() */
15796 +
15797 +/*****************************************************************************/
15798 +/*
15799 + * open "/proc/net/rxrpc/peers" which provides a summary of extant peers
15800 + */
15801 +static int rxrpc_proc_peers_open(struct inode *inode, struct file *file)
15802 +{
15803 +       struct seq_file *m;
15804 +       int ret;
15805 +
15806 +       ret = seq_open(file,&rxrpc_proc_peers_ops);
15807 +       if (ret<0)
15808 +               return ret;
15809 +
15810 +       m = file->private_data;
15811 +       m->private = PDE(inode)->data;
15812 +
15813 +       return 0;
15814 +} /* end rxrpc_proc_peers_open() */
15815 +
15816 +/*****************************************************************************/
15817 +/*
15818 + * set up the iterator to start reading from the peers list and return the first item
15819 + */
15820 +static void *rxrpc_proc_peers_start(struct seq_file *m, loff_t *_pos)
15821 +{
15822 +       struct list_head *_p;
15823 +       loff_t pos = *_pos;
15824 +
15825 +       /* lock the list against modification */
15826 +       down_read(&rxrpc_peers_sem);
15827 +
15828 +       /* allow for the header line */
15829 +       if (!pos)
15830 +               return (void *)1;
15831 +       pos--;
15832 +
15833 +       /* find the n'th element in the list */
15834 +       list_for_each(_p,&rxrpc_peers)
15835 +               if (!pos--)
15836 +                       break;
15837 +
15838 +       return _p!=&rxrpc_peers ? _p : NULL;
15839 +} /* end rxrpc_proc_peers_start() */
15840 +
15841 +/*****************************************************************************/
15842 +/*
15843 + * move to next conn in peers list
15844 + */
15845 +static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos)
15846 +{
15847 +       struct list_head *_p;
15848 +
15849 +       (*pos)++;
15850 +
15851 +       _p = v;
15852 +       _p = v==(void*)1 ? rxrpc_peers.next : _p->next;
15853 +
15854 +       return _p!=&rxrpc_peers ? _p : NULL;
15855 +} /* end rxrpc_proc_peers_next() */
15856 +
15857 +/*****************************************************************************/
15858 +/*
15859 + * clean up after reading from the peers list
15860 + */
15861 +static void rxrpc_proc_peers_stop(struct seq_file *p, void *v)
15862 +{
15863 +       up_read(&rxrpc_peers_sem);
15864 +
15865 +} /* end rxrpc_proc_peers_stop() */
15866 +
15867 +/*****************************************************************************/
15868 +/*
15869 + * display a header line followed by a load of conn lines
15870 + */
15871 +static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
15872 +{
15873 +       struct rxrpc_peer *peer = list_entry(v,struct rxrpc_peer,proc_link);
15874 +       signed long timeout;
15875 +
15876 +       /* display header on line 1 */
15877 +       if (v == (void *)1) {
15878 +               seq_puts(m,"LOCAL REMOTE   USAGE CONNS  TIMEOUT   MTU RTT(uS)\n");
15879 +               return 0;
15880 +       }
15881 +
15882 +       /* display one peer per line on subsequent lines */
15883 +       timeout = 0;
15884 +       if (!list_empty(&peer->timeout.link))
15885 +               timeout = (signed long)peer->timeout.timo_jif - (signed long)jiffies;
15886 +
15887 +       seq_printf(m,"%5hu %08x %5d %5d %8ld %5u %7lu\n",
15888 +                  peer->trans->port,
15889 +                  ntohl(peer->addr.s_addr),
15890 +                  atomic_read(&peer->usage),
15891 +                  atomic_read(&peer->conn_count),
15892 +                  timeout,
15893 +                  peer->if_mtu,
15894 +                  peer->rtt
15895 +                  );
15896 +
15897 +       return 0;
15898 +} /* end rxrpc_proc_peers_show() */
15899 +
15900 +/*****************************************************************************/
15901 +/*
15902 + * open "/proc/net/rxrpc/connections" which provides a summary of extant connections
15903 + */
15904 +static int rxrpc_proc_conns_open(struct inode *inode, struct file *file)
15905 +{
15906 +       struct seq_file *m;
15907 +       int ret;
15908 +
15909 +       ret = seq_open(file,&rxrpc_proc_conns_ops);
15910 +       if (ret<0)
15911 +               return ret;
15912 +
15913 +       m = file->private_data;
15914 +       m->private = PDE(inode)->data;
15915 +
15916 +       return 0;
15917 +} /* end rxrpc_proc_conns_open() */
15918 +
15919 +/*****************************************************************************/
15920 +/*
15921 + * set up the iterator to start reading from the conns list and return the first item
15922 + */
15923 +static void *rxrpc_proc_conns_start(struct seq_file *m, loff_t *_pos)
15924 +{
15925 +       struct list_head *_p;
15926 +       loff_t pos = *_pos;
15927 +
15928 +       /* lock the list against modification */
15929 +       down_read(&rxrpc_conns_sem);
15930 +
15931 +       /* allow for the header line */
15932 +       if (!pos)
15933 +               return (void *)1;
15934 +       pos--;
15935 +
15936 +       /* find the n'th element in the list */
15937 +       list_for_each(_p,&rxrpc_conns)
15938 +               if (!pos--)
15939 +                       break;
15940 +
15941 +       return _p!=&rxrpc_conns ? _p : NULL;
15942 +} /* end rxrpc_proc_conns_start() */
15943 +
15944 +/*****************************************************************************/
15945 +/*
15946 + * move to next conn in conns list
15947 + */
15948 +static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos)
15949 +{
15950 +       struct list_head *_p;
15951 +
15952 +       (*pos)++;
15953 +
15954 +       _p = v;
15955 +       _p = v==(void*)1 ? rxrpc_conns.next : _p->next;
15956 +
15957 +       return _p!=&rxrpc_conns ? _p : NULL;
15958 +} /* end rxrpc_proc_conns_next() */
15959 +
15960 +/*****************************************************************************/
15961 +/*
15962 + * clean up after reading from the conns list
15963 + */
15964 +static void rxrpc_proc_conns_stop(struct seq_file *p, void *v)
15965 +{
15966 +       up_read(&rxrpc_conns_sem);
15967 +
15968 +} /* end rxrpc_proc_conns_stop() */
15969 +
15970 +/*****************************************************************************/
15971 +/*
15972 + * display a header line followed by a load of conn lines
15973 + */
15974 +static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
15975 +{
15976 +       struct rxrpc_connection *conn = list_entry(v,struct rxrpc_connection,proc_link);
15977 +       signed long timeout;
15978 +
15979 +       /* display header on line 1 */
15980 +       if (v == (void *)1) {
15981 +               seq_puts(m,
15982 +                        "LOCAL REMOTE   RPORT SRVC CONN     END SERIALNO CALLNO     MTU  TIMEOUT"
15983 +                        "\n");
15984 +               return 0;
15985 +       }
15986 +
15987 +       /* display one conn per line on subsequent lines */
15988 +       timeout = 0;
15989 +       if (!list_empty(&conn->timeout.link))
15990 +               timeout = (signed long)conn->timeout.timo_jif - (signed long)jiffies;
15991 +
15992 +       seq_printf(m,"%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5u %8ld\n",
15993 +                  conn->trans->port,
15994 +                  ntohl(conn->addr.sin_addr.s_addr),
15995 +                  ntohs(conn->addr.sin_port),
15996 +                  ntohs(conn->service_id),
15997 +                  ntohl(conn->conn_id),
15998 +                  conn->out_clientflag ? "CLT" : "SRV",
15999 +                  conn->serial_counter,
16000 +                  conn->call_counter,
16001 +                  conn->mtu_size,
16002 +                  timeout
16003 +                  );
16004 +
16005 +       return 0;
16006 +} /* end rxrpc_proc_conns_show() */
16007 +
16008 +/*****************************************************************************/
16009 +/*
16010 + * open "/proc/net/rxrpc/calls" which provides a summary of extant calls
16011 + */
16012 +static int rxrpc_proc_calls_open(struct inode *inode, struct file *file)
16013 +{
16014 +       struct seq_file *m;
16015 +       int ret;
16016 +
16017 +       ret = seq_open(file,&rxrpc_proc_calls_ops);
16018 +       if (ret<0)
16019 +               return ret;
16020 +
16021 +       m = file->private_data;
16022 +       m->private = PDE(inode)->data;
16023 +
16024 +       return 0;
16025 +} /* end rxrpc_proc_calls_open() */
16026 +
16027 +/*****************************************************************************/
16028 +/*
16029 + * set up the iterator to start reading from the calls list and return the first item
16030 + */
16031 +static void *rxrpc_proc_calls_start(struct seq_file *m, loff_t *_pos)
16032 +{
16033 +       struct list_head *_p;
16034 +       loff_t pos = *_pos;
16035 +
16036 +       /* lock the list against modification */
16037 +       down_read(&rxrpc_calls_sem);
16038 +
16039 +       /* allow for the header line */
16040 +       if (!pos)
16041 +               return (void *)1;
16042 +       pos--;
16043 +
16044 +       /* find the n'th element in the list */
16045 +       list_for_each(_p,&rxrpc_calls)
16046 +               if (!pos--)
16047 +                       break;
16048 +
16049 +       return _p!=&rxrpc_calls ? _p : NULL;
16050 +} /* end rxrpc_proc_calls_start() */
16051 +
16052 +/*****************************************************************************/
16053 +/*
16054 + * move to next call in calls list
16055 + */
16056 +static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos)
16057 +{
16058 +       struct list_head *_p;
16059 +
16060 +       (*pos)++;
16061 +
16062 +       _p = v;
16063 +       _p = v==(void*)1 ? rxrpc_calls.next : _p->next;
16064 +
16065 +       return _p!=&rxrpc_calls ? _p : NULL;
16066 +} /* end rxrpc_proc_calls_next() */
16067 +
16068 +/*****************************************************************************/
16069 +/*
16070 + * clean up after reading from the calls list
16071 + */
16072 +static void rxrpc_proc_calls_stop(struct seq_file *p, void *v)
16073 +{
16074 +       up_read(&rxrpc_calls_sem);
16075 +
16076 +} /* end rxrpc_proc_calls_stop() */
16077 +
16078 +/*****************************************************************************/
16079 +/*
16080 + * display a header line followed by a load of call lines
16081 + */
16082 +static int rxrpc_proc_calls_show(struct seq_file *m, void *v)
16083 +{
16084 +       struct rxrpc_call *call = list_entry(v,struct rxrpc_call,call_link);
16085 +
16086 +       /* display header on line 1 */
16087 +       if (v == (void *)1) {
16088 +               seq_puts(m,
16089 +                        "LOCAL REMOT SRVC CONN     CALL     DIR USE "
16090 +                        " L STATE   OPCODE ABORT    ERRNO\n"
16091 +                        );
16092 +               return 0;
16093 +       }
16094 +
16095 +       /* display one call per line on subsequent lines */
16096 +       seq_printf(m,
16097 +                  "%5hu %5hu %04hx %08x %08x %s %3u%c"
16098 +                  " %c %-7.7s %6d %08x %5d\n",
16099 +                  call->conn->trans->port,
16100 +                  ntohs(call->conn->addr.sin_port),
16101 +                  ntohs(call->conn->service_id),
16102 +                  ntohl(call->conn->conn_id),
16103 +                  ntohl(call->call_id),
16104 +                  call->conn->service ? "SVC" : "CLT",
16105 +                  atomic_read(&call->usage),
16106 +                  waitqueue_active(&call->waitq) ? 'w' : ' ',
16107 +                  call->app_last_rcv ? 'Y' : '-',
16108 +                  (call->app_call_state!=RXRPC_CSTATE_ERROR ?
16109 +                   rxrpc_call_states7[call->app_call_state] :
16110 +                   rxrpc_call_error_states7[call->app_err_state]),
16111 +                  call->app_opcode,
16112 +                  call->app_abort_code,
16113 +                  call->app_errno
16114 +                  );
16115 +
16116 +       return 0;
16117 +} /* end rxrpc_proc_calls_show() */
16118 diff -urNp linux-5240/net/rxrpc/rxrpc_syms.c linux-5250/net/rxrpc/rxrpc_syms.c
16119 --- linux-5240/net/rxrpc/rxrpc_syms.c   1970-01-01 01:00:00.000000000 +0100
16120 +++ linux-5250/net/rxrpc/rxrpc_syms.c   
16121 @@ -0,0 +1,51 @@
16122 +/* rxrpc_syms.c: exported Rx RPC layer interface symbols
16123 + *
16124 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
16125 + * Written by David Howells (dhowells@redhat.com)
16126 + *
16127 + * This program is free software; you can redistribute it and/or
16128 + * modify it under the terms of the GNU General Public License
16129 + * as published by the Free Software Foundation; either version
16130 + * 2 of the License, or (at your option) any later version.
16131 + */
16132 +
16133 +#include <linux/config.h>
16134 +#include <linux/module.h>
16135 +
16136 +#include <rxrpc/transport.h>
16137 +#include <rxrpc/connection.h>
16138 +#include <rxrpc/call.h>
16139 +#include <rxrpc/krxiod.h>
16140 +
16141 +/* call.c */
16142 +EXPORT_SYMBOL(rxrpc_call_rcv_timeout);
16143 +EXPORT_SYMBOL(rxrpc_call_acks_timeout);
16144 +EXPORT_SYMBOL(rxrpc_call_dfr_ack_timeout);
16145 +EXPORT_SYMBOL(rxrpc_call_max_resend);
16146 +EXPORT_SYMBOL(rxrpc_call_states);
16147 +EXPORT_SYMBOL(rxrpc_call_error_states);
16148 +
16149 +EXPORT_SYMBOL(rxrpc_create_call);
16150 +EXPORT_SYMBOL(rxrpc_incoming_call);
16151 +EXPORT_SYMBOL(rxrpc_put_call);
16152 +EXPORT_SYMBOL(rxrpc_call_abort);
16153 +EXPORT_SYMBOL(rxrpc_call_read_data);
16154 +EXPORT_SYMBOL(rxrpc_call_write_data);
16155 +EXPORT_SYMBOL(rxrpc_call_flush);
16156 +
16157 +/* connection.c */
16158 +EXPORT_SYMBOL(rxrpc_create_connection);
16159 +EXPORT_SYMBOL(rxrpc_put_connection);
16160 +
16161 +/* sysctl.c */
16162 +EXPORT_SYMBOL(rxrpc_ktrace);
16163 +EXPORT_SYMBOL(rxrpc_kdebug);
16164 +EXPORT_SYMBOL(rxrpc_kproto);
16165 +EXPORT_SYMBOL(rxrpc_knet);
16166 +
16167 +/* transport.c */
16168 +EXPORT_SYMBOL(rxrpc_create_transport);
16169 +EXPORT_SYMBOL(rxrpc_clear_transport);
16170 +EXPORT_SYMBOL(rxrpc_put_transport);
16171 +EXPORT_SYMBOL(rxrpc_add_service);
16172 +EXPORT_SYMBOL(rxrpc_del_service);
16173 diff -urNp linux-5240/net/rxrpc/sysctl.c linux-5250/net/rxrpc/sysctl.c
16174 --- linux-5240/net/rxrpc/sysctl.c       1970-01-01 01:00:00.000000000 +0100
16175 +++ linux-5250/net/rxrpc/sysctl.c       
16176 @@ -0,0 +1,73 @@
16177 +/* sysctl.c: Rx RPC control
16178 + *
16179 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
16180 + * Written by David Howells (dhowells@redhat.com)
16181 + *
16182 + * This program is free software; you can redistribute it and/or
16183 + * modify it under the terms of the GNU General Public License
16184 + * as published by the Free Software Foundation; either version
16185 + * 2 of the License, or (at your option) any later version.
16186 + */
16187 +
16188 +#include <linux/config.h>
16189 +#include <linux/sched.h>
16190 +#include <linux/slab.h>
16191 +#include <linux/module.h>
16192 +#include <linux/sysctl.h>
16193 +#include <linux/config.h>
16194 +#include <rxrpc/types.h>
16195 +#include <rxrpc/rxrpc.h>
16196 +#include <asm/errno.h>
16197 +#include "internal.h"
16198 +
16199 +int rxrpc_ktrace;
16200 +int rxrpc_kdebug;
16201 +int rxrpc_kproto;
16202 +int rxrpc_knet;
16203 +
16204 +#ifdef CONFIG_SYSCTL
16205 +static struct ctl_table_header *rxrpc_sysctl = NULL;
16206 +
16207 +static ctl_table rxrpc_sysctl_table[] = {
16208 +        { 1, "kdebug", &rxrpc_kdebug, sizeof(int), 0644, NULL, &proc_dointvec },
16209 +        { 2, "ktrace", &rxrpc_ktrace, sizeof(int), 0644, NULL, &proc_dointvec },
16210 +        { 3, "kproto", &rxrpc_kproto, sizeof(int), 0644, NULL, &proc_dointvec },
16211 +        { 4, "knet",   &rxrpc_knet,   sizeof(int), 0644, NULL, &proc_dointvec },
16212 +       { 0 }
16213 +};
16214 +
16215 +static ctl_table rxrpc_dir_sysctl_table[] = {
16216 +       { 1, "rxrpc", NULL, 0, 0555, rxrpc_sysctl_table },
16217 +       { 0 }
16218 +};
16219 +#endif /* CONFIG_SYSCTL */
16220 +
16221 +/*****************************************************************************/
16222 +/*
16223 + * initialise the sysctl stuff for Rx RPC
16224 + */
16225 +int rxrpc_sysctl_init(void)
16226 +{
16227 +#ifdef CONFIG_SYSCTL
16228 +       rxrpc_sysctl = register_sysctl_table(rxrpc_dir_sysctl_table,0);
16229 +       if (!rxrpc_sysctl)
16230 +               return -ENOMEM;
16231 +#endif /* CONFIG_SYSCTL */
16232 +
16233 +       return 0;
16234 +} /* end rxrpc_sysctl_init() */
16235 +
16236 +/*****************************************************************************/
16237 +/*
16238 + * clean up the sysctl stuff for Rx RPC
16239 + */
16240 +void rxrpc_sysctl_cleanup(void)
16241 +{
16242 +#ifdef CONFIG_SYSCTL
16243 +       if (rxrpc_sysctl) {
16244 +               unregister_sysctl_table(rxrpc_sysctl);
16245 +               rxrpc_sysctl = NULL;
16246 +       }
16247 +#endif /* CONFIG_SYSCTL */
16248 +
16249 +} /* end rxrpc_sysctl_cleanup() */
16250 diff -urNp linux-5240/net/rxrpc/transport.c linux-5250/net/rxrpc/transport.c
16251 --- linux-5240/net/rxrpc/transport.c    1970-01-01 01:00:00.000000000 +0100
16252 +++ linux-5250/net/rxrpc/transport.c    
16253 @@ -0,0 +1,824 @@
16254 +/* transport.c: Rx Transport routines
16255 + *
16256 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
16257 + * Written by David Howells (dhowells@redhat.com)
16258 + *
16259 + * This program is free software; you can redistribute it and/or
16260 + * modify it under the terms of the GNU General Public License
16261 + * as published by the Free Software Foundation; either version
16262 + * 2 of the License, or (at your option) any later version.
16263 + */
16264 +
16265 +#include <linux/sched.h>
16266 +#include <linux/slab.h>
16267 +#include <linux/module.h>
16268 +#include <rxrpc/transport.h>
16269 +#include <rxrpc/peer.h>
16270 +#include <rxrpc/connection.h>
16271 +#include <rxrpc/call.h>
16272 +#include <rxrpc/message.h>
16273 +#include <rxrpc/krxiod.h>
16274 +#include <rxrpc/krxsecd.h>
16275 +#include <linux/udp.h>
16276 +#include <linux/in.h>
16277 +#include <linux/in6.h>
16278 +#include <linux/icmp.h>
16279 +#include <net/sock.h>
16280 +#include <net/ip.h>
16281 +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
16282 +#include <linux/ipv6.h>        /* this should _really_ be in errqueue.h.. */
16283 +#endif
16284 +#include <linux/errqueue.h>
16285 +#include <asm/uaccess.h>
16286 +#include <asm/checksum.h>
16287 +#include "internal.h"
16288 +
16289 +struct errormsg {
16290 +       struct cmsghdr                  cmsg;           /* control message header */
16291 +       struct sock_extended_err        ee;             /* extended error information */
16292 +       struct sockaddr_in              icmp_src;       /* ICMP packet source address */
16293 +};
16294 +
16295 +static spinlock_t rxrpc_transports_lock = SPIN_LOCK_UNLOCKED;
16296 +static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
16297 +
16298 +__RXACCT_DECL(atomic_t rxrpc_transport_count);
16299 +LIST_HEAD(rxrpc_proc_transports);
16300 +DECLARE_RWSEM(rxrpc_proc_transports_sem);
16301 +
16302 +static void rxrpc_data_ready(struct sock *sk, int count);
16303 +static void rxrpc_error_report(struct sock *sk);
16304 +static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
16305 +                                       struct list_head *msgq);
16306 +static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
16307 +
16308 +/*****************************************************************************/
16309 +/*
16310 + * create a new transport endpoint using the specified UDP port
16311 + */
16312 +int rxrpc_create_transport(unsigned short port, struct rxrpc_transport **_trans)
16313 +{
16314 +       struct rxrpc_transport *trans;
16315 +       struct sockaddr_in sin;
16316 +       mm_segment_t oldfs;
16317 +       struct sock *sock;
16318 +       int ret, opt;
16319 +
16320 +       _enter("%hu",port);
16321 +
16322 +       trans = kmalloc(sizeof(struct rxrpc_transport),GFP_KERNEL);
16323 +       if (!trans)
16324 +               return -ENOMEM;
16325 +
16326 +       memset(trans,0,sizeof(struct rxrpc_transport));
16327 +       atomic_set(&trans->usage,1);
16328 +       INIT_LIST_HEAD(&trans->services);
16329 +       INIT_LIST_HEAD(&trans->link);
16330 +       INIT_LIST_HEAD(&trans->krxiodq_link);
16331 +       spin_lock_init(&trans->lock);
16332 +       INIT_LIST_HEAD(&trans->peer_active);
16333 +       INIT_LIST_HEAD(&trans->peer_graveyard);
16334 +       spin_lock_init(&trans->peer_gylock);
16335 +       init_waitqueue_head(&trans->peer_gy_waitq);
16336 +       rwlock_init(&trans->peer_lock);
16337 +       atomic_set(&trans->peer_count,0);
16338 +       trans->port = port;
16339 +
16340 +       /* create a UDP socket to be my actual transport endpoint */
16341 +       ret = sock_create(PF_INET,SOCK_DGRAM,IPPROTO_UDP,&trans->socket);
16342 +       if (ret<0)
16343 +               goto error;
16344 +
16345 +       /* use the specified port */
16346 +       if (port) {
16347 +               memset(&sin,0,sizeof(sin));
16348 +               sin.sin_family = AF_INET;
16349 +               sin.sin_port = htons(port);
16350 +               ret = trans->socket->ops->bind(trans->socket,(struct sockaddr *)&sin,sizeof(sin));
16351 +               if (ret<0)
16352 +                       goto error;
16353 +       }
16354 +
16355 +       opt = 1;
16356 +       oldfs = get_fs();
16357 +       set_fs(KERNEL_DS);
16358 +       ret = trans->socket->ops->setsockopt(trans->socket,SOL_IP,IP_RECVERR,
16359 +                                            (char*)&opt,sizeof(opt));
16360 +       set_fs(oldfs);
16361 +
16362 +       spin_lock(&rxrpc_transports_lock);
16363 +       list_add(&trans->link,&rxrpc_transports);
16364 +       spin_unlock(&rxrpc_transports_lock);
16365 +
16366 +       /* set the socket up */
16367 +       sock = trans->socket->sk;
16368 +       sock->user_data = trans;
16369 +       sock->data_ready = rxrpc_data_ready;
16370 +       sock->error_report = rxrpc_error_report;
16371 +
16372 +       down_write(&rxrpc_proc_transports_sem);
16373 +       list_add_tail(&trans->proc_link,&rxrpc_proc_transports);
16374 +       up_write(&rxrpc_proc_transports_sem);
16375 +
16376 +       __RXACCT(atomic_inc(&rxrpc_transport_count));
16377 +
16378 +       *_trans = trans;
16379 +       _leave(" = 0 (%p)",trans);
16380 +       return 0;
16381 +
16382 + error:
16383 +       rxrpc_put_transport(trans);
16384 +
16385 +       _leave(" = %d",ret);
16386 +
16387 +       return ret;
16388 +
16389 +} /* end rxrpc_create_transport() */
16390 +
16391 +/*****************************************************************************/
16392 +/*
16393 + * clear the connections on a transport endpoint
16394 + */
16395 +void rxrpc_clear_transport(struct rxrpc_transport *trans)
16396 +{
16397 +       //struct rxrpc_connection *conn;
16398 +
16399 +} /* end rxrpc_clear_transport() */
16400 +
16401 +/*****************************************************************************/
16402 +/*
16403 + * destroy a transport endpoint
16404 + */
16405 +void rxrpc_put_transport(struct rxrpc_transport *trans)
16406 +{
16407 +       _enter("%p{u=%d p=%hu}",trans,atomic_read(&trans->usage),trans->port);
16408 +
16409 +       if (atomic_read(&trans->usage)<=0)
16410 +               BUG();
16411 +
16412 +       /* to prevent a race, the decrement and the dequeue must be effectively atomic */
16413 +       spin_lock(&rxrpc_transports_lock);
16414 +       if (likely(!atomic_dec_and_test(&trans->usage))) {
16415 +               spin_unlock(&rxrpc_transports_lock);
16416 +               _leave("");
16417 +               return;
16418 +       }
16419 +
16420 +       list_del(&trans->link);
16421 +       spin_unlock(&rxrpc_transports_lock);
16422 +
16423 +       /* finish cleaning up the transport */
16424 +       if (trans->socket)
16425 +               trans->socket->ops->shutdown(trans->socket,2);
16426 +
16427 +       rxrpc_krxsecd_clear_transport(trans);
16428 +       rxrpc_krxiod_dequeue_transport(trans);
16429 +
16430 +       /* discard all peer information */
16431 +       rxrpc_peer_clearall(trans);
16432 +
16433 +       down_write(&rxrpc_proc_transports_sem);
16434 +       list_del(&trans->proc_link);
16435 +       up_write(&rxrpc_proc_transports_sem);
16436 +       __RXACCT(atomic_dec(&rxrpc_transport_count));
16437 +
16438 +       /* close the socket */
16439 +       if (trans->socket) {
16440 +               trans->socket->sk->user_data = NULL;
16441 +               sock_release(trans->socket);
16442 +               trans->socket = NULL;
16443 +       }
16444 +
16445 +       kfree(trans);
16446 +
16447 +       _leave("");
16448 +
16449 +} /* end rxrpc_put_transport() */
16450 +
16451 +/*****************************************************************************/
16452 +/*
16453 + * add a service to a transport to be listened upon
16454 + */
16455 +int rxrpc_add_service(struct rxrpc_transport *trans, struct rxrpc_service *newsrv)
16456 +{
16457 +       struct rxrpc_service *srv;
16458 +       struct list_head *_p;
16459 +       int ret = -EEXIST;
16460 +
16461 +       _enter("%p{%hu},%p{%hu}",trans,trans->port,newsrv,newsrv->service_id);
16462 +
16463 +       /* verify that the service ID is not already present */
16464 +       spin_lock(&trans->lock);
16465 +
16466 +       list_for_each(_p,&trans->services) {
16467 +               srv = list_entry(_p,struct rxrpc_service,link);
16468 +               if (srv->service_id==newsrv->service_id)
16469 +                       goto out;
16470 +       }
16471 +
16472 +       /* okay - add the transport to the list */
16473 +       list_add_tail(&newsrv->link,&trans->services);
16474 +       rxrpc_get_transport(trans);
16475 +       ret = 0;
16476 +
16477 + out:
16478 +       spin_unlock(&trans->lock);
16479 +
16480 +       _leave("= %d",ret);
16481 +       return ret;
16482 +
16483 +} /* end rxrpc_add_service() */
16484 +
16485 +/*****************************************************************************/
16486 +/*
16487 + * remove a service from a transport
16488 + */
16489 +void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
16490 +{
16491 +       _enter("%p{%hu},%p{%hu}",trans,trans->port,srv,srv->service_id);
16492 +
16493 +       spin_lock(&trans->lock);
16494 +       list_del(&srv->link);
16495 +       spin_unlock(&trans->lock);
16496 +
16497 +       rxrpc_put_transport(trans);
16498 +
16499 +       _leave("");
16500 +
16501 +} /* end rxrpc_del_service() */
16502 +
16503 +/*****************************************************************************/
16504 +/*
16505 + * INET callback when data has been received on the socket.
16506 + */
16507 +static void rxrpc_data_ready(struct sock *sk, int count)
16508 +{
16509 +       struct rxrpc_transport *trans;
16510 +
16511 +       _enter("%p{t=%p},%d",sk,sk->user_data,count);
16512 +
16513 +       /* queue the transport for attention by krxiod */
16514 +       trans = (struct rxrpc_transport *) sk->user_data;
16515 +       if (trans)
16516 +               rxrpc_krxiod_queue_transport(trans);
16517 +
16518 +       /* wake up anyone waiting on the socket */
16519 +       if (sk->sleep && waitqueue_active(sk->sleep))
16520 +               wake_up_interruptible(sk->sleep);
16521 +
16522 +       _leave("");
16523 +
16524 +} /* end rxrpc_data_ready() */
16525 +
16526 +/*****************************************************************************/
16527 +/*
16528 + * INET callback when an ICMP error packet is received
16529 + * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE)
16530 + */
16531 +static void rxrpc_error_report(struct sock *sk)
16532 +{
16533 +       struct rxrpc_transport *trans;
16534 +
16535 +       _enter("%p{t=%p}",sk,sk->user_data);
16536 +
16537 +       /* queue the transport for attention by krxiod */
16538 +       trans = (struct rxrpc_transport *) sk->user_data;
16539 +       if (trans) {
16540 +               trans->error_rcvd = 1;
16541 +               rxrpc_krxiod_queue_transport(trans);
16542 +       }
16543 +
16544 +       /* wake up anyone waiting on the socket */
16545 +       if (sk->sleep && waitqueue_active(sk->sleep))
16546 +               wake_up_interruptible(sk->sleep);
16547 +
16548 +       _leave("");
16549 +
16550 +} /* end rxrpc_error_report() */
16551 +
16552 +/*****************************************************************************/
16553 +/*
16554 + * split a message up, allocating message records and filling them in from the contents of a
16555 + * socket buffer
16556 + */
16557 +static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
16558 +                             struct sk_buff *pkt,
16559 +                             struct list_head *msgq)
16560 +{
16561 +       struct rxrpc_message *msg;
16562 +       int ret;
16563 +
16564 +       _enter("");
16565 +
16566 +       msg = kmalloc(sizeof(struct rxrpc_message),GFP_KERNEL);
16567 +       if (!msg) {
16568 +               _leave(" = -ENOMEM");
16569 +               return -ENOMEM;
16570 +       }
16571 +
16572 +       memset(msg,0,sizeof(*msg));
16573 +       atomic_set(&msg->usage,1);
16574 +       list_add_tail(&msg->link,msgq);
16575 +
16576 +       /* dig out the Rx routing parameters */
16577 +       if (skb_copy_bits(pkt,sizeof(struct udphdr),&msg->hdr,sizeof(msg->hdr))<0) {
16578 +               ret = -EBADMSG;
16579 +               goto error;
16580 +       }
16581 +
16582 +       msg->trans = trans;
16583 +       msg->state = RXRPC_MSG_RECEIVED;
16584 +       msg->stamp = pkt->stamp;
16585 +       msg->seq = ntohl(msg->hdr.seq);
16586 +
16587 +       /* attach the packet */
16588 +       skb_get(pkt);
16589 +       msg->pkt = pkt;
16590 +
16591 +       msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header);
16592 +       msg->dsize = msg->pkt->len - msg->offset;
16593 +
16594 +       _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
16595 +            msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
16596 +            ntohl(msg->hdr.epoch),
16597 +            (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
16598 +            ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
16599 +            ntohl(msg->hdr.callNumber),
16600 +            rxrpc_pkts[msg->hdr.type],
16601 +            msg->hdr.flags,
16602 +            ntohs(msg->hdr.serviceId),
16603 +            msg->hdr.securityIndex);
16604 +
16605 +       __RXACCT(atomic_inc(&rxrpc_message_count));
16606 +
16607 +       /* split off jumbo packets */
16608 +       while (msg->hdr.type==RXRPC_PACKET_TYPE_DATA && msg->hdr.flags & RXRPC_JUMBO_PACKET) {
16609 +               struct rxrpc_jumbo_header jumbo;
16610 +               struct rxrpc_message *jumbomsg = msg;
16611 +
16612 +               _debug("split jumbo packet");
16613 +
16614 +               /* quick sanity check */
16615 +               ret = -EBADMSG;
16616 +               if (msg->dsize < RXRPC_JUMBO_DATALEN+sizeof(struct rxrpc_jumbo_header))
16617 +                       goto error;
16618 +               if (msg->hdr.flags & RXRPC_LAST_PACKET)
16619 +                       goto error;
16620 +
16621 +               /* dig out the secondary header */
16622 +               if (skb_copy_bits(pkt,msg->offset+RXRPC_JUMBO_DATALEN,&jumbo,sizeof(jumbo))<0)
16623 +                       goto error;
16624 +
16625 +               /* allocate a new message record */
16626 +               ret = -ENOMEM;
16627 +               msg = kmalloc(sizeof(struct rxrpc_message),GFP_KERNEL);
16628 +               if (!msg)
16629 +                       goto error;
16630 +
16631 +               memcpy(msg,jumbomsg,sizeof(*msg));
16632 +               list_add_tail(&msg->link,msgq);
16633 +
16634 +               /* adjust the jumbo packet */
16635 +               jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
16636 +
16637 +               /* attach the packet here too */
16638 +               skb_get(pkt);
16639 +
16640 +               /* adjust the parameters */
16641 +               msg->seq++;
16642 +               msg->hdr.seq = htonl(msg->seq);
16643 +               msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
16644 +               msg->offset += RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header);
16645 +               msg->dsize -= RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header);
16646 +               msg->hdr.flags = jumbo.flags;
16647 +               msg->hdr._rsvd = jumbo._rsvd;
16648 +
16649 +               _net("Rx Split jumbo packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
16650 +                    msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
16651 +                    ntohl(msg->hdr.epoch),
16652 +                    (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
16653 +                    ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
16654 +                    ntohl(msg->hdr.callNumber),
16655 +                    rxrpc_pkts[msg->hdr.type],
16656 +                    msg->hdr.flags,
16657 +                    ntohs(msg->hdr.serviceId),
16658 +                    msg->hdr.securityIndex);
16659 +
16660 +               __RXACCT(atomic_inc(&rxrpc_message_count));
16661 +       }
16662 +
16663 +       _leave(" = 0 #%d",atomic_read(&rxrpc_message_count));
16664 +       return 0;
16665 +
16666 + error:
16667 +       while (!list_empty(msgq)) {
16668 +               msg = list_entry(msgq->next,struct rxrpc_message,link);
16669 +               list_del_init(&msg->link);
16670 +
16671 +               rxrpc_put_message(msg);
16672 +       }
16673 +
16674 +       _leave(" = %d",ret);
16675 +       return ret;
16676 +} /* end rxrpc_incoming_msg() */
16677 +
16678 +/*****************************************************************************/
16679 +/*
16680 + * accept a new call
16681 + * - called from krxiod in process context
16682 + */
16683 +void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
16684 +{
16685 +       struct rxrpc_message *msg;
16686 +       struct rxrpc_peer *peer;
16687 +       struct sk_buff *pkt;
16688 +       int ret;
16689 +       u32 addr;
16690 +       u16 port;
16691 +
16692 +       LIST_HEAD(msgq);
16693 +
16694 +       _enter("%p{%d}",trans,trans->port);
16695 +
16696 +       for (;;) {
16697 +               /* deal with outstanting errors first */
16698 +               if (trans->error_rcvd)
16699 +                       rxrpc_trans_receive_error_report(trans);
16700 +
16701 +               /* attempt to receive a packet */
16702 +               pkt = skb_recv_datagram(trans->socket->sk,0,1,&ret);
16703 +               if (!pkt) {
16704 +                       if (ret==-EAGAIN) {
16705 +                               _leave(" EAGAIN");
16706 +                               return;
16707 +                       }
16708 +
16709 +                       /* an icmp error may have occurred */
16710 +                       rxrpc_krxiod_queue_transport(trans);
16711 +                       _leave(" error %d\n",ret);
16712 +                       return;
16713 +               }
16714 +
16715 +               /* we'll probably need to checksum it (didn't call sock_recvmsg) */
16716 +               if (pkt->ip_summed != CHECKSUM_UNNECESSARY) {
16717 +                       if ((unsigned short)csum_fold(skb_checksum(pkt,0,pkt->len,pkt->csum))) {
16718 +                               kfree_skb(pkt);
16719 +                               rxrpc_krxiod_queue_transport(trans);
16720 +                               _leave(" CSUM failed");
16721 +                               return;
16722 +                       }
16723 +               }
16724 +
16725 +               addr = pkt->nh.iph->saddr;
16726 +               port = pkt->h.uh->source;
16727 +
16728 +               _net("Rx Received UDP packet from %08x:%04hu",ntohl(addr),ntohs(port));
16729 +
16730 +               /* unmarshall the Rx parameters and split jumbo packets */
16731 +               ret = rxrpc_incoming_msg(trans,pkt,&msgq);
16732 +               if (ret<0) {
16733 +                       kfree_skb(pkt);
16734 +                       rxrpc_krxiod_queue_transport(trans);
16735 +                       _leave(" bad packet");
16736 +                       return;
16737 +               }
16738 +
16739 +               if (list_empty(&msgq)) BUG();
16740 +
16741 +               msg = list_entry(msgq.next,struct rxrpc_message,link);
16742 +
16743 +               /* locate the record for the peer from which it originated */
16744 +               ret = rxrpc_peer_lookup(trans,addr,&peer);
16745 +               if (ret<0) {
16746 +                       kdebug("Rx No connections from that peer");
16747 +                       rxrpc_trans_immediate_abort(trans,msg,-EINVAL);
16748 +                       goto finished_msg;
16749 +               }
16750 +
16751 +               /* try and find a matching connection */
16752 +               ret = rxrpc_connection_lookup(peer,msg,&msg->conn);
16753 +               if (ret<0) {
16754 +                       kdebug("Rx Unknown Connection");
16755 +                       rxrpc_trans_immediate_abort(trans,msg,-EINVAL);
16756 +                       rxrpc_put_peer(peer);
16757 +                       goto finished_msg;
16758 +               }
16759 +               rxrpc_put_peer(peer);
16760 +
16761 +               /* deal with the first packet of a new call */
16762 +               if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
16763 +                   msg->hdr.type==RXRPC_PACKET_TYPE_DATA &&
16764 +                   ntohl(msg->hdr.seq)==1
16765 +                   ) {
16766 +                       _debug("Rx New server call");
16767 +                       rxrpc_trans_receive_new_call(trans,&msgq);
16768 +                       goto finished_msg;
16769 +               }
16770 +
16771 +               /* deal with subsequent packet(s) of call */
16772 +               _debug("Rx Call packet");
16773 +               while (!list_empty(&msgq)) {
16774 +                       msg = list_entry(msgq.next,struct rxrpc_message,link);
16775 +                       list_del_init(&msg->link);
16776 +
16777 +                       ret = rxrpc_conn_receive_call_packet(msg->conn,NULL,msg);
16778 +                       if (ret<0) {
16779 +                               rxrpc_trans_immediate_abort(trans,msg,ret);
16780 +                               rxrpc_put_message(msg);
16781 +                               goto finished_msg;
16782 +                       }
16783 +
16784 +                       rxrpc_put_message(msg);
16785 +               }
16786 +
16787 +               goto finished_msg;
16788 +
16789 +               /* dispose of the packets */
16790 +       finished_msg:
16791 +               while (!list_empty(&msgq)) {
16792 +                       msg = list_entry(msgq.next,struct rxrpc_message,link);
16793 +                       list_del_init(&msg->link);
16794 +
16795 +                       rxrpc_put_message(msg);
16796 +               }
16797 +               kfree_skb(pkt);
16798 +       }
16799 +
16800 +       _leave("");
16801 +
16802 +} /* end rxrpc_trans_receive_packet() */
16803 +
16804 +/*****************************************************************************/
16805 +/*
16806 + * accept a new call from a client trying to connect to one of my services
16807 + * - called in process context
16808 + */
16809 +static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
16810 +                                       struct list_head *msgq)
16811 +{
16812 +       struct rxrpc_message *msg;
16813 +
16814 +       _enter("");
16815 +
16816 +       /* only bother with the first packet */
16817 +       msg = list_entry(msgq->next,struct rxrpc_message,link);
16818 +       list_del_init(&msg->link);
16819 +       rxrpc_krxsecd_queue_incoming_call(msg);
16820 +       rxrpc_put_message(msg);
16821 +
16822 +       _leave(" = 0");
16823 +
16824 +       return 0;
16825 +} /* end rxrpc_trans_receive_new_call() */
16826 +
16827 +/*****************************************************************************/
16828 +/*
16829 + * perform an immediate abort without connection or call structures
16830 + */
16831 +int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
16832 +                               struct rxrpc_message *msg,
16833 +                               int error)
16834 +{
16835 +       struct rxrpc_header ahdr;
16836 +       struct sockaddr_in sin;
16837 +       struct msghdr msghdr;
16838 +       struct iovec iov[2];
16839 +       mm_segment_t oldfs;
16840 +       int len, ret;
16841 +       u32 _error;
16842 +
16843 +       _enter("%p,%p,%d",trans,msg,error);
16844 +
16845 +       /* don't abort an abort packet */
16846 +       if (msg->hdr.type==RXRPC_PACKET_TYPE_ABORT) {
16847 +               _leave(" = 0");
16848 +               return 0;
16849 +       }
16850 +
16851 +       _error = htonl(-error);
16852 +
16853 +       /* set up the message to be transmitted */
16854 +       memcpy(&ahdr,&msg->hdr,sizeof(ahdr));
16855 +       ahdr.epoch      = msg->hdr.epoch;
16856 +       ahdr.serial     = htonl(1);
16857 +       ahdr.seq        = 0;
16858 +       ahdr.type       = RXRPC_PACKET_TYPE_ABORT;
16859 +       ahdr.flags      = RXRPC_LAST_PACKET | (~msg->hdr.flags & RXRPC_CLIENT_INITIATED);
16860 +
16861 +       iov[0].iov_len  = sizeof(ahdr);
16862 +       iov[0].iov_base = &ahdr;
16863 +       iov[1].iov_len  = sizeof(_error);
16864 +       iov[1].iov_base = &_error;
16865 +
16866 +       len = sizeof(ahdr) + sizeof(_error);
16867 +
16868 +       memset(&sin,0,sizeof(sin));
16869 +       sin.sin_family          = AF_INET;
16870 +       sin.sin_port            = msg->pkt->h.uh->source;
16871 +       sin.sin_addr.s_addr     = msg->pkt->nh.iph->saddr;
16872 +
16873 +       msghdr.msg_name         = &sin;
16874 +       msghdr.msg_namelen      = sizeof(sin);
16875 +       msghdr.msg_iov          = iov;
16876 +       msghdr.msg_iovlen       = 2;
16877 +       msghdr.msg_control      = NULL;
16878 +       msghdr.msg_controllen   = 0;
16879 +       msghdr.msg_flags        = MSG_DONTWAIT;
16880 +
16881 +       _net("Sending message type %d of %d bytes to %08x:%d",
16882 +            ahdr.type,
16883 +            len,
16884 +            htonl(sin.sin_addr.s_addr),
16885 +            htons(sin.sin_port));
16886 +
16887 +       /* send the message */
16888 +       oldfs = get_fs();
16889 +       set_fs(KERNEL_DS);
16890 +       ret = sock_sendmsg(trans->socket,&msghdr,len);
16891 +       set_fs(oldfs);
16892 +
16893 +       _leave(" = %d",ret);
16894 +       return ret;
16895 +} /* end rxrpc_trans_immediate_abort() */
16896 +
16897 +/*****************************************************************************/
16898 +/*
16899 + * receive an ICMP error report and percolate it to all connections heading to the affected
16900 + * host or port
16901 + */
16902 +static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
16903 +{
16904 +       struct rxrpc_connection *conn;
16905 +       struct sockaddr_in sin;
16906 +       struct rxrpc_peer *peer;
16907 +       struct list_head connq, *_p;
16908 +       struct errormsg emsg;
16909 +       struct msghdr msg;
16910 +       mm_segment_t oldfs;
16911 +       int local, err;
16912 +       u16 port;
16913 +
16914 +       _enter("%p",trans);
16915 +
16916 +       for (;;) {
16917 +               trans->error_rcvd = 0;
16918 +
16919 +               /* try and receive an error message */
16920 +               msg.msg_name    = &sin;
16921 +               msg.msg_namelen = sizeof(sin);
16922 +               msg.msg_iov     = NULL;
16923 +               msg.msg_iovlen  = 0;
16924 +               msg.msg_control = &emsg;
16925 +               msg.msg_controllen = sizeof(emsg);
16926 +               msg.msg_flags   = 0;
16927 +
16928 +               oldfs = get_fs();
16929 +               set_fs(KERNEL_DS);
16930 +               err = sock_recvmsg(trans->socket,&msg,0,MSG_ERRQUEUE|MSG_DONTWAIT|MSG_TRUNC);
16931 +               set_fs(oldfs);
16932 +
16933 +               if (err==-EAGAIN) {
16934 +                       _leave("");
16935 +                       return;
16936 +               }
16937 +
16938 +               if (err<0) {
16939 +                       printk("%s: unable to recv an error report: %d\n",__FUNCTION__,err);
16940 +                       _leave("");
16941 +                       return;
16942 +               }
16943 +
16944 +               msg.msg_controllen = (char*)msg.msg_control - (char*)&emsg;
16945 +
16946 +               if (msg.msg_controllen<sizeof(emsg.cmsg) || msg.msg_namelen<sizeof(sin)) {
16947 +                       printk("%s: short control message (nlen=%u clen=%u fl=%x)\n",
16948 +                              __FUNCTION__,msg.msg_namelen,msg.msg_controllen,msg.msg_flags);
16949 +                       continue;
16950 +               }
16951 +
16952 +               _net("Rx Received control message { len=%u level=%u type=%u }",
16953 +                    emsg.cmsg.cmsg_len,emsg.cmsg.cmsg_level,emsg.cmsg.cmsg_type);
16954 +
16955 +               if (sin.sin_family!=AF_INET) {
16956 +                       printk("Rx Ignoring error report with non-INET address (fam=%u)",
16957 +                              sin.sin_family);
16958 +                       continue;
16959 +               }
16960 +
16961 +               _net("Rx Received message pertaining to host addr=%x port=%hu",
16962 +                    ntohl(sin.sin_addr.s_addr),ntohs(sin.sin_port));
16963 +
16964 +               if (emsg.cmsg.cmsg_level!=SOL_IP || emsg.cmsg.cmsg_type!=IP_RECVERR) {
16965 +                       printk("Rx Ignoring unknown error report { level=%u type=%u }",
16966 +                              emsg.cmsg.cmsg_level,emsg.cmsg.cmsg_type);
16967 +                       continue;
16968 +               }
16969 +
16970 +               if (msg.msg_controllen<sizeof(emsg.cmsg)+sizeof(emsg.ee)) {
16971 +                       printk("%s: short error message (%u)\n",__FUNCTION__,msg.msg_controllen);
16972 +                       _leave("");
16973 +                       return;
16974 +               }
16975 +
16976 +               port = sin.sin_port;
16977 +
16978 +               switch (emsg.ee.ee_origin) {
16979 +               case SO_EE_ORIGIN_ICMP:
16980 +                       local = 0;
16981 +                       switch (emsg.ee.ee_type) {
16982 +                       case ICMP_DEST_UNREACH:
16983 +                               switch (emsg.ee.ee_code) {
16984 +                               case ICMP_NET_UNREACH:
16985 +                                       _net("Rx Received ICMP Network Unreachable");
16986 +                                       port = 0;
16987 +                                       err = -ENETUNREACH;
16988 +                                       break;
16989 +                               case ICMP_HOST_UNREACH:
16990 +                                       _net("Rx Received ICMP Host Unreachable");
16991 +                                       port = 0;
16992 +                                       err = -EHOSTUNREACH;
16993 +                                       break;
16994 +                               case ICMP_PORT_UNREACH:
16995 +                                       _net("Rx Received ICMP Port Unreachable");
16996 +                                       err = -ECONNREFUSED;
16997 +                                       break;
16998 +                               case ICMP_NET_UNKNOWN:
16999 +                                       _net("Rx Received ICMP Unknown Network");
17000 +                                       port = 0;
17001 +                                       err = -ENETUNREACH;
17002 +                                       break;
17003 +                               case ICMP_HOST_UNKNOWN:
17004 +                                       _net("Rx Received ICMP Unknown Host");
17005 +                                       port = 0;
17006 +                                       err = -EHOSTUNREACH;
17007 +                                       break;
17008 +                               default:
17009 +                                       _net("Rx Received ICMP DestUnreach { code=%u }",
17010 +                                            emsg.ee.ee_code);
17011 +                                       err = emsg.ee.ee_errno;
17012 +                                       break;
17013 +                               }
17014 +                               break;
17015 +
17016 +                       case ICMP_TIME_EXCEEDED:
17017 +                               _net("Rx Received ICMP TTL Exceeded");
17018 +                               err = emsg.ee.ee_errno;
17019 +                               break;
17020 +
17021 +                       default:
17022 +                               _proto("Rx Received ICMP error { type=%u code=%u }",
17023 +                                      emsg.ee.ee_type,emsg.ee.ee_code);
17024 +                               err = emsg.ee.ee_errno;
17025 +                               break;
17026 +                       }
17027 +                       break;
17028 +
17029 +               case SO_EE_ORIGIN_LOCAL:
17030 +                       _proto("Rx Received local error { error=%d }",emsg.ee.ee_errno);
17031 +                       local = 1;
17032 +                       err = emsg.ee.ee_errno;
17033 +                       break;
17034 +
17035 +               case SO_EE_ORIGIN_NONE:
17036 +               case SO_EE_ORIGIN_ICMP6:
17037 +               default:
17038 +                       _proto("Rx Received error report { orig=%u }",emsg.ee.ee_origin);
17039 +                       local = 0;
17040 +                       err = emsg.ee.ee_errno;
17041 +                       break;
17042 +               }
17043 +
17044 +               /* find all the connections between this transport and the affected destination */
17045 +               INIT_LIST_HEAD(&connq);
17046 +
17047 +               if (rxrpc_peer_lookup(trans,sin.sin_addr.s_addr,&peer)==0) {
17048 +                       read_lock(&peer->conn_lock);
17049 +                       list_for_each(_p,&peer->conn_active) {
17050 +                               conn = list_entry(_p,struct rxrpc_connection,link);
17051 +                               if (port && conn->addr.sin_port!=port)
17052 +                                       continue;
17053 +                               if (!list_empty(&conn->err_link))
17054 +                                       continue;
17055 +
17056 +                               rxrpc_get_connection(conn);
17057 +                               list_add_tail(&conn->err_link,&connq);
17058 +                       }
17059 +                       read_unlock(&peer->conn_lock);
17060 +
17061 +                       /* service all those connections */
17062 +                       while (!list_empty(&connq)) {
17063 +                               conn = list_entry(connq.next,struct rxrpc_connection,err_link);
17064 +                               list_del(&conn->err_link);
17065 +
17066 +                               rxrpc_conn_handle_error(conn,local,err);
17067 +
17068 +                               rxrpc_put_connection(conn);
17069 +                       }
17070 +
17071 +                       rxrpc_put_peer(peer);
17072 +               }
17073 +       }
17074 +
17075 +       _leave("");
17076 +       return;
17077 +} /* end rxrpc_trans_receive_error_report() */
This page took 1.502915 seconds and 3 git commands to generate.