1 diff -urNp linux-5240/fs/afs/cache.c linux-5250/fs/afs/cache.c
2 --- linux-5240/fs/afs/cache.c 1970-01-01 01:00:00.000000000 +0100
3 +++ linux-5250/fs/afs/cache.c
5 +/* cache.c: AFS local cache management
7 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
8 + * Written by David Howells (dhowells@redhat.com)
10 + * This program is free software; you can redistribute it and/or
11 + * modify it under the terms of the GNU General Public License
12 + * as published by the Free Software Foundation; either version
13 + * 2 of the License, or (at your option) any later version.
16 +#include <linux/kernel.h>
17 +#include <linux/module.h>
18 +#include <linux/init.h>
19 +#include <linux/slab.h>
20 +#include <linux/fs.h>
21 +#include <linux/namei.h>
22 +#include <linux/pagemap.h>
23 +#include <linux/devfs_fs_kernel.h>
24 +#include <linux/buffer_head.h>
26 +#include "cmservice.h"
27 +#include "fsclient.h"
31 +#include "internal.h"
33 +static LIST_HEAD(afs_cache_list);
34 +static DECLARE_MUTEX(afs_cache_list_sem);
36 +static int afs_cache_read_sig(afs_cache_t *cache);
38 +/*****************************************************************************/
40 + * stat a cache device to find its device numbers
42 +static int afs_cache_get_kdev(const char *cachename, kdev_t *_kdev, struct file **_bdfile)
44 + struct nameidata nd;
45 + struct inode *inode;
46 + struct file *bdfile;
49 + /* look up the cache device file */
53 + ret = path_lookup(cachename,LOOKUP_FOLLOW,&nd);
57 + /* check it's a block device file */
58 + inode = nd.dentry->d_inode;
60 + if (!S_ISBLK(inode->i_mode)) {
65 + /* open a file for it */
66 + bdfile = dentry_open(nd.dentry,nd.mnt,O_RDWR);
70 + *_kdev = inode->i_rdev;
73 +} /* end afs_cache_get_kdev() */
75 +/*****************************************************************************/
77 + * open a cache device
79 +int afs_cache_open(const char *cachename, afs_cache_t **_cache)
81 + struct list_head *_p;
82 + afs_cache_t *cache, *ncache;
86 + _enter("{%s}",cachename);
90 + /* pre-allocate a cache record */
92 + ncache = kmalloc(sizeof(*ncache),GFP_KERNEL);
94 + _leave(" = %d [lookup failed]",ret);
97 + memset(ncache,0,sizeof(*ncache));
99 + atomic_set(&ncache->usage,1);
100 + INIT_LIST_HEAD(&ncache->link);
101 + init_rwsem(&ncache->sem);
103 + /* lookup the block device */
104 + ret = afs_cache_get_kdev(cachename,&dev,&ncache->bdfile);
107 + _leave(" = %d [lookup failed]",ret);
113 + /* see if we've already got the cache open */
115 + down(&afs_cache_list_sem);
117 + list_for_each(_p,&afs_cache_list) {
118 + cache = list_entry(_p,afs_cache_t,link);
119 + if (kdev_same(cache->dev,dev))
124 + /* we already have the cache open */
126 + kdebug("kAFS re-using cache block dev %s",kdevname(dev));
127 + filp_close(cache->bdfile,NULL);
130 + afs_get_cache(cache);
133 + /* we don't already have the cache open */
135 + kdebug("kAFS using cache block dev %s",kdevname(dev));
139 + /* grab a handle to the block device */
141 + cache->bdev = bdget(kdev_t_to_nr(dev));
145 + /* open the block device node */
146 + ret = blkdev_get(cache->bdev,FMODE_READ|FMODE_WRITE,0,BDEV_RAW);
150 + /* quick insanity check */
151 + check_disk_change(cache->dev);
153 + if (is_read_only(cache->dev))
156 + /* mark it as mine */
157 + ret = bd_claim(cache->bdev,cache);
162 + ret = afs_cache_read_sig(cache);
166 + list_add_tail(&cache->link,&afs_cache_list);
170 + up(&afs_cache_list_sem);
171 + _leave(" = 0 (%p{%x})",cache->bdev,kdev_t_to_nr(cache->dev));
175 + bd_release(cache->bdev);
178 + filp_close(cache->bdfile,NULL);
180 + blkdev_put(cache->bdev,BDEV_RAW);
181 + cache->bdev = NULL;
186 + up(&afs_cache_list_sem);
187 + _leave(" = %d",ret);
190 +} /* end afs_cache_open() */
192 +/*****************************************************************************/
194 + * release a cache device
196 +void afs_put_cache(afs_cache_t *cache)
198 + _enter("%p{u=%d}",cache,atomic_read(&cache->usage));
200 + down(&afs_cache_list_sem);
202 + if (!atomic_dec_and_test(&cache->usage))
205 + list_del(&cache->link);
207 + up(&afs_cache_list_sem);
209 + /* if that was the last ref, then release the kernel resources */
211 + kdebug("kAFS releasing cache block dev %s",kdevname(cache->dev));
212 + filp_close(cache->bdfile,NULL);
213 + bd_release(cache->bdev);
214 + blkdev_put(cache->bdev,BDEV_RAW);
219 +} /* end afs_put_cache() */
221 +/*****************************************************************************/
223 + * read the cache signature block from the cache device
225 +static int afs_cache_read_sig(afs_cache_t *cache)
227 + struct afs_cache_super_block *csb;
228 + struct buffer_head *bh;
230 + bh = __bread(cache->bdev,0,PAGE_CACHE_SIZE);
234 + csb = (struct afs_cache_super_block*) bh->b_data;
236 + /* validate the cache superblock */
237 + if (memcmp(csb->magic,AFS_CACHE_SUPER_MAGIC,sizeof(csb->magic))!=0) {
238 + printk("kAFS cache magic string doesn't match\n");
241 + if (csb->endian!=AFS_CACHE_SUPER_ENDIAN) {
242 + printk("kAFS endian spec doesn't match (%hx not %hx)\n",
243 + csb->endian,AFS_CACHE_SUPER_ENDIAN);
246 + if (csb->version!=AFS_CACHE_SUPER_VERSION) {
247 + printk("kAFS version doesn't match (%u not %u)\n",
248 + csb->version,AFS_CACHE_SUPER_VERSION);
252 + /* copy the layout into the cache management structure */
253 + memcpy(&cache->layout,csb,sizeof(cache->layout));
257 +} /* end afs_cache_read_sig() */
259 +/*****************************************************************************/
261 + * update part of one page in the cache
262 + * - the caller must hold any required protective locks
263 + * - based on rw_swap_page_base()
265 +static int afs_cache_update_region(afs_cache_t *cache, afs_cache_bix_t bix,
266 + unsigned off, size_t size, void *buf)
268 + mm_segment_t oldfs;
272 + _enter("%s,%u,%u,%u,",kdevname(cache->dev),bix,off,size);
274 + pos = bix*cache->layout.bsize + off;
278 + ret = generic_file_write(cache->bdfile,buf,size,&pos);
284 + _leave(" = %d",ret);
286 +} /* end afs_cache_update_region() */
288 +/*****************************************************************************/
290 + * look up cell information in the cache
291 + * - mkafscache preloads /etc/sysconfig/kafs/cell-serv-db into the cache
293 +int afs_cache_lookup_cell(afs_cache_t *cache,
296 + struct afs_cache_cell_block *ccells;
297 + struct afs_cache_cell *ccell;
298 + struct buffer_head *bh;
299 + afs_cache_cellix_t cix, stop, rem;
300 + afs_cache_bix_t bix;
303 + _enter("%s,%s",kdevname(cache->dev),cell->name);
307 + rem = cache->layout.ncells;
309 + for (bix=cache->layout.off_cell_cache; bix<cache->layout.off_volume_bitmap; bix++) {
310 + /* read the next block */
311 + bh = __bread(cache->bdev,bix,PAGE_CACHE_SIZE);
313 + kleave(" = -EIO (block %u)",bix);
317 + ccells = (struct afs_cache_cell_block*) bh->b_data;
320 + stop = min((size_t)rem,
321 + sizeof(struct afs_cache_cell_block)/sizeof(struct afs_cache_cell));
324 + for (cix=0; cix<stop; cix++) {
325 + ccell = &ccells->entries[cix];
326 + if (strncmp(cell->name,ccell->name,sizeof(ccell->name))==0)
333 + _leave(" = -ENOENT");
337 + /* found the cell record - copy out the details */
338 + bix -= cache->layout.off_cell_cache;
339 + cell->cache_ix = cix;
340 + cell->cache_ix += bix * sizeof(struct afs_cache_cell_block)/sizeof(struct afs_cache_cell);
342 + memcpy(cell->vl_addrs,ccell->servers,sizeof(cell->vl_addrs));
344 + for (loop=0; loop<sizeof(cell->vl_addrs)/sizeof(cell->vl_addrs[0]); loop++)
345 + if (!cell->vl_addrs[loop].s_addr)
347 + cell->vl_naddrs = loop;
350 + _leave(" = 0 (bix=%u cix=%u ccix=%u)",bix,cix,cell->cache_ix);
353 +} /* end afs_cache_lookup_cell() */
355 +/*****************************************************************************/
357 + * search for a volume location record in the cache
359 +int afs_cache_lookup_vlocation(afs_vlocation_t *vlocation)
364 + struct afs_cache_volume_block *cvols;
365 + struct afs_cache_volume *cvol;
366 + struct buffer_head *bh;
367 + afs_cache_bix_t bix;
368 + unsigned rem, stop, ix;
370 + _enter("%s,{v=%s cix=%u}",
371 + kdevname(vlocation->cache->dev),vlocation->vldb.name,vlocation->vldb.cell_ix);
373 + rem = vlocation->cache->layout.nvols;
375 + for (bix=vlocation->cache->layout.off_volume_cache;
376 + bix<vlocation->cache->layout.off_vnode_bitmap;
379 + /* read the next block */
380 + bh = __bread(vlocation->cache->bdev,bix,PAGE_CACHE_SIZE);
382 + kleave(" = -EIO (block %u)",bix);
386 + cvols = (struct afs_cache_volume_block*) bh->b_data;
389 + stop = min((size_t)rem,sizeof(*cvols)/sizeof(*cvol));
392 + for (ix=0; ix<stop; ix++) {
393 + cvol = &cvols->entries[ix];
395 + _debug("FOUND[%u.%u]: cell %u vol '%s' %08x",
396 + bix,ix,cvol->cell_ix,cvol->name,cvol->vid[0]);
397 + if (cvol->cell_ix==vlocation->vldb.cell_ix &&
398 + memcmp(vlocation->vldb.name,cvol->name,sizeof(cvol->name))==0) {
406 + _leave(" = %d",-ENOENT);
410 + /* found the cell record */
411 + memcpy(&vlocation->vldb,cvol,sizeof(*cvol));
414 + /* note the volume ID */
415 + bix -= vlocation->cache->layout.off_volume_cache;
416 + vlocation->vix.index = (ix + bix * (sizeof(*cvols)/sizeof(*cvol))) << 2;
418 + _leave(" = 0 (bix=%u ix=%u vix=%hu)",bix,ix,vlocation->vix.index);
422 +} /* end afs_cache_lookup_vlocation() */
424 +/*****************************************************************************/
426 + * search for a volume location record in the cache, and if one's not available then reap the
427 + * eldest not currently in use
429 +int afs_cache_update_vlocation(afs_vlocation_t *vlocation)
434 + struct afs_cache_volume_block *cvols;
435 + struct afs_cache_volume *cvol;
436 + struct buffer_head *bh;
437 + afs_cache_bix_t bix;
438 + unsigned rem, stop, ix, candidate, tmp;
443 + _enter("%s,{v=%s cix=%u}",
444 + kdevname(vlocation->cache->dev),vlocation->vldb.name,vlocation->vldb.cell_ix);
446 + candidate = UINT_MAX;
447 + cand_age = ULONG_MAX;
448 + rem = vlocation->cache->layout.nvols;
450 + for (bix=vlocation->cache->layout.off_volume_cache;
451 + bix<vlocation->cache->layout.off_vnode_bitmap;
454 + /* read the next block */
455 + bh = __bread(vlocation->cache->bdev,bix,PAGE_CACHE_SIZE);
457 + kleave(" = -EIO (block %u)",bix);
461 + cvols = (struct afs_cache_volume_block*) bh->b_data;
464 + stop = min((size_t)rem,sizeof(*cvols)/sizeof(*cvol));
467 + for (ix=0; ix<stop; ix++) {
468 + cvol = &cvols->entries[ix];
470 + _debug("FOUND[%u.%u]: cell %u vol '%s' %08x",
471 + bix,ix,cvol->cell_ix,cvol->name,cvol->vid[0]);
472 + if (cvol->cell_ix==vlocation->vldb.cell_ix &&
473 + memcmp(vlocation->vldb.name,cvol->name,sizeof(cvol->name))==0) {
477 + if (candidate!=UINT_MAX && cvol->ctime<cand_age) {
478 + /* TODO: don't recycle volumes currently in use */
479 + cand_age = cvol->ctime;
480 + candidate = bix - vlocation->cache->layout.off_volume_cache;
481 + candidate = ix + candidate * sizeof(*cvols)/sizeof(*cvol);
488 + /* TODO: recycle old entry if no spare slots available */
489 + if (vlocation->cache->layout.nvols>=vlocation->cache->layout.maxvols)
492 + /* insert new entry */
493 + ix = vlocation->vix.index = vlocation->cache->layout.nvols++;
494 + tmp = (sizeof(*cvols)/sizeof(*cvol));
495 + bix = ix / tmp + vlocation->cache->layout.off_volume_cache;
498 + kdebug("INSERT (bix=%u ix=%u)",bix,ix);
499 + ret = afs_cache_update_region(vlocation->cache,
507 + /* update the superblock */
508 + ret = afs_cache_update_region(vlocation->cache,
510 + sizeof(vlocation->cache->layout),
511 + &vlocation->cache->layout);
513 + /* TODO: handle failure by winding back cache->layout.nvols */
516 + _leave(" = %d (bix=%u ix=%u vix=%hu)",ret,bix,ix,vlocation->vix.index);
522 + /* update the on-disk cache with the latest news */
523 + _debug("UPDATE (bix=%u ix=%u)",bix,ix);
524 + ret = afs_cache_update_region(vlocation->cache,
532 + /* found the cell record - note the volume ID */
533 + bix -= vlocation->cache->layout.off_volume_cache;
534 + vlocation->vix.index = (ix + bix * (sizeof(*cvols)/sizeof(*cvol))) << 2;
536 + _leave(" = 0 (bix=%u ix=%u vix=%hu)",bix,ix,vlocation->vix.index);
540 +} /* end afs_cache_update_vlocation() */
542 +/*****************************************************************************/
544 + * search for a vnode record in the cache, and if one's not available then reap the
545 + * eldest not currently in use
547 +int afs_cache_lookup_vnode(afs_volume_t *volume, afs_vnode_t *vnode)
552 + struct afs_cache_vnode_index_block *cindexb;
553 + struct afs_cache_vnode_index cindex;
554 + struct buffer_head *bh;
555 + afs_cache_bix_t bix;
556 + unsigned rem, stop, ix, candidate, tmp;
560 + _enter("{cix=%u vix=%u},{%u,%u,%u}",
561 + volume->cix,volume->vix.index,vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
563 + candidate = UINT_MAX;
564 + cand_age = ULONG_MAX;
565 + rem = volume->cache->layout.nvnodes;
567 + for (bix=volume->cache->layout.off_vnode_index;
568 + bix<volume->cache->layout.off_vnode_cache;
571 + /* read the next block */
572 + bh = __bread(volume->cache->bdev,bix,PAGE_CACHE_SIZE);
574 + kleave(" = -EIO (block %u)",bix);
578 + cindexb = (struct afs_cache_vnode_index_block*) bh->b_data;
581 + stop = min((size_t)rem,AFS_CACHE_VNODE_INDEX_PER_BLOCK);
584 + for (ix=0; ix<stop; ix++) {
585 + memcpy(&cindex,&cindexb->index[ix],sizeof(cindex));
588 + if (cindex.vnode>0)
589 + kdebug("FOUND[%u.%u]: vix %u vnode %u",
590 + bix,ix,cindex.volume_ix.index,cindex.vnode);
593 + if (cindex.vnode==vnode->fid.vnode &&
594 + cindex.volume_ix.index==volume->vix.index)
597 + if (candidate!=UINT_MAX && cindex.atime<cand_age) {
598 + /* TODO: don't recycle volumes currently in use */
599 + cand_age = cindex.atime;
600 + candidate = bix - volume->cache->layout.off_vnode_index;
601 + candidate = ix + candidate * AFS_CACHE_VNODE_INDEX_PER_BLOCK;
608 + /* TODO: recycle old entry if no spare slots available */
609 + if (volume->cache->layout.nvnodes>=volume->cache->layout.maxvnodes)
612 + /* append new entry */
613 + vnode->nix = volume->cache->layout.nvnodes++;
615 + cindex.vnode = vnode->fid.vnode;
616 + cindex.atime = xtime.tv_sec;
617 + cindex.volume_ix = volume->vix;
620 + tmp = AFS_CACHE_VNODE_INDEX_PER_BLOCK;
621 + bix = ix / tmp + volume->cache->layout.off_vnode_index;
624 + _debug("CACHE APPEND VNODE %u (bix=%u ix=%u)",vnode->nix,bix,ix);
625 + ret = afs_cache_update_region(volume->cache,
633 + /* update the superblock */
634 + ret = afs_cache_update_region(volume->cache,
636 + sizeof(volume->cache->layout),
637 + &volume->cache->layout);
639 + /* TODO: handle failure by winding back cache->layout.nvnodes */
642 + _leave(" = %d (bix=%u ix=%u nix=%u)",ret,bix,ix,vnode->nix);
648 + cindex.atime = xtime.tv_sec;
650 + /* update the on-disk cache with the latest news */
651 + _debug("UPDATE (bix=%u ix=%u)",bix,ix);
652 + ret = afs_cache_update_region(volume->cache,
660 + /* found the cell record - note the volume ID */
661 + bix -= volume->cache->layout.off_vnode_index;
662 + vnode->nix = ix + bix * AFS_CACHE_VNODE_INDEX_PER_BLOCK;
664 + _leave(" = 0 (bix=%u ix=%u nix=%u)",bix,ix,vnode->nix);
668 +} /* end afs_cache_lookup_vnode() */
669 diff -urNp linux-5240/fs/afs/cache.h linux-5250/fs/afs/cache.h
670 --- linux-5240/fs/afs/cache.h 1970-01-01 01:00:00.000000000 +0100
671 +++ linux-5250/fs/afs/cache.h
673 +/* cache.h: AFS local cache management
675 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
676 + * Written by David Howells (dhowells@redhat.com)
678 + * This program is free software; you can redistribute it and/or
679 + * modify it under the terms of the GNU General Public License
680 + * as published by the Free Software Foundation; either version
681 + * 2 of the License, or (at your option) any later version.
684 +#ifndef _LINUX_AFS_CACHE_H
685 +#define _LINUX_AFS_CACHE_H
687 +#include <linux/fs.h>
688 +#include "cache-layout.h"
692 +/*****************************************************************************/
694 + * AFS cache management record
698 + atomic_t usage; /* usage count */
699 + struct list_head link; /* link in cache list */
700 + kdev_t dev; /* device numbers */
701 + struct block_device *bdev; /* block device */
702 + struct file *bdfile; /* file attached to block device */
703 + struct rw_semaphore sem; /* access semaphore */
704 + struct afs_cache_super_block layout; /* layout description */
707 +extern int afs_cache_open(const char *name, afs_cache_t **_cache);
709 +#define afs_get_cache(C) do { atomic_inc(&(C)->usage); } while(0)
711 +extern void afs_put_cache(afs_cache_t *cache);
713 +extern int afs_cache_lookup_cell(afs_cache_t *cache, afs_cell_t *cell);
714 +extern int afs_cache_lookup_vlocation(afs_vlocation_t *vlocation);
715 +extern int afs_cache_update_vlocation(afs_vlocation_t *vlocation);
716 +extern int afs_cache_lookup_vnode(afs_volume_t *volume, afs_vnode_t *vnode);
718 +#endif /* __KERNEL__ */
720 +#endif /* _LINUX_AFS_CACHE_H */
721 diff -urNp linux-5240/fs/afs/cache-layout.h linux-5250/fs/afs/cache-layout.h
722 --- linux-5240/fs/afs/cache-layout.h 1970-01-01 01:00:00.000000000 +0100
723 +++ linux-5250/fs/afs/cache-layout.h
725 +/* cache-layout.h: AFS cache layout
727 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
728 + * Written by David Howells (dhowells@redhat.com)
730 + * This program is free software; you can redistribute it and/or
731 + * modify it under the terms of the GNU General Public License
732 + * as published by the Free Software Foundation; either version
733 + * 2 of the License, or (at your option) any later version.
736 + * The cache is stored on a block device and is laid out as follows:
738 + * 0 +------------------------------------------------
742 + * 1 +------------------------------------------------
744 + * | Cell Cache (preloaded by mkafscache)
746 + * +------------------------------------------------
748 + * | Volume Cache Allocation BitMap (1 page)
750 + * +------------------------------------------------
754 + * +------------------------------------------------
756 + * | Vnode Cache Allocation BitMap
758 + * +------------------------------------------------
760 + * | Vnode Cache Index
762 + * +------------------------------------------------
766 + * +------------------------------------------------
768 + * | Data Cache Allocation BitMap
770 + * +------------------------------------------------
774 + * End +------------------------------------------------
778 +#ifndef _LINUX_AFS_CACHE_LAYOUT_H
779 +#define _LINUX_AFS_CACHE_LAYOUT_H
783 +typedef unsigned afs_cache_bix_t;
784 +typedef unsigned short afs_cache_cellix_t;
786 +typedef struct { unsigned short index; } afs_cache_volix_t;
788 +/*****************************************************************************/
790 + * cache superblock block layout
792 +struct afs_cache_super_block
794 + char magic[10]; /* magic number */
795 +#define AFS_CACHE_SUPER_MAGIC "kafscache"
797 + unsigned short endian; /* 0x1234 stored CPU-normal order */
798 +#define AFS_CACHE_SUPER_ENDIAN 0x1234
800 + unsigned version; /* format version */
801 +#define AFS_CACHE_SUPER_VERSION 1
804 + afs_cache_cellix_t ncells; /* number of cells cached */
805 + afs_cache_cellix_t maxcells; /* max number of cells cacheable */
806 + afs_cache_cellix_t thiscell; /* index of this cell in cache */
807 + unsigned short nvols; /* volume cache usage */
808 + unsigned short maxvols; /* maximum number of volumes cacheable */
809 + unsigned nvnodes; /* vnode cache usage */
810 + unsigned maxvnodes; /* maximum number of vnodes cacheable */
813 + unsigned bsize; /* cache block size */
814 + afs_cache_bix_t off_cell_cache; /* block offset of cell cache */
815 + afs_cache_bix_t off_volume_bitmap; /* block offset of volume alloc bitmap */
816 + afs_cache_bix_t off_volume_cache; /* block offset of volume cache */
817 + afs_cache_bix_t off_vnode_bitmap; /* block offset of vnode alloc bitmap */
818 + afs_cache_bix_t off_vnode_index; /* block offset of vnode index */
819 + afs_cache_bix_t off_vnode_cache; /* block offset of vnode cache */
820 + afs_cache_bix_t off_data_bitmap; /* block offset of data bitmap */
821 + afs_cache_bix_t off_data_cache; /* block offset of data cache */
822 + afs_cache_bix_t off_end; /* block offset of end of cache */
825 +/*****************************************************************************/
829 +struct afs_cache_cell
831 + char name[64]; /* cell name (padded with NULs) */
832 + struct in_addr servers[16]; /* cached cell servers */
835 +struct afs_cache_cell_block
837 + struct afs_cache_cell entries[PAGE_SIZE/sizeof(struct afs_cache_cell)];
840 +/*****************************************************************************/
842 + * cached volume info
843 + * - indexed by (afs_cache_volix_t/4)
844 + * - (afs_cache_volix_t%4) is 0 for R/W, 1 for R/O and 2 for Bak (3 is not used)
846 +struct afs_cache_volume
848 + char name[64]; /* volume name (padded with NULs) */
849 + afs_volid_t vid[3]; /* volume IDs for R/W, R/O and Bak volumes */
850 + unsigned char vidmask; /* voltype mask for vid[] */
851 + unsigned char _pad[1];
852 + unsigned short nservers; /* number of entries used in servers[] */
853 + struct in_addr servers[8]; /* fileserver addresses */
854 + unsigned char srvtmask[8]; /* voltype masks for servers[] */
855 +#define AFS_CACHE_VOL_STM_RW 0x01 /* server holds a R/W version of the volume */
856 +#define AFS_CACHE_VOL_STM_RO 0x02 /* server holds a R/O version of the volume */
857 +#define AFS_CACHE_VOL_STM_BAK 0x04 /* server holds a backup version of the volume */
859 + afs_cache_cellix_t cell_ix; /* cell cache index (MAX_UINT if unused) */
860 + time_t ctime; /* time at which cached */
863 +struct afs_cache_volume_block
865 + struct afs_cache_volume entries[PAGE_SIZE/sizeof(struct afs_cache_volume)];
868 +/*****************************************************************************/
870 + * cached vnode index
871 + * - map on a 1:1 basis with the vnode index table
873 +struct afs_cache_vnode_index
875 + afs_vnodeid_t vnode; /* vnode ID */
876 + time_t atime; /* last time accessed */
877 + afs_cache_volix_t volume_ix; /* volume cache index */
878 +} __attribute__((packed));
880 +#define AFS_CACHE_VNODE_INDEX_PER_BLOCK ((size_t)(PAGE_SIZE/sizeof(struct afs_cache_vnode_index)))
882 +struct afs_cache_vnode_index_block
884 + struct afs_cache_vnode_index index[AFS_CACHE_VNODE_INDEX_PER_BLOCK];
887 +/*****************************************************************************/
889 + * cached vnode rights entry
891 +struct afs_cache_rights
895 + unsigned short mode;
896 +} __attribute__((packed));
898 +/*****************************************************************************/
900 + * vnode (inode) metadata cache
901 + * - PAGE_SIZE in size
903 +struct afs_cache_vnode_block
906 + unsigned unique; /* FID unique */
909 + afs_file_type_t type; /* file type */
910 + unsigned nlink; /* link count */
911 + size_t size; /* file size */
912 + afs_dataversion_t version; /* current data version */
913 + unsigned author; /* author ID */
914 + unsigned owner; /* owner ID */
915 + unsigned anon_access; /* access rights for unauthenticated caller */
916 + unsigned short mode; /* UNIX mode */
917 + time_t mtime; /* last time server changed data */
918 + time_t cachetime; /* time at which cached */
920 + /* file contents */
921 + afs_cache_bix_t pt0_bix; /* "page table 0" block index */
922 + afs_cache_bix_t pgd_bix; /* "page directory" block index */
924 + /* access rights */
925 + size_t nrights; /* number of cached rights */
926 + struct afs_cache_rights rights[0]; /* cached access rights buffer */
929 +#define AFS_CACHE_VNODE_MAXRIGHTS \
930 + ((PAGE_SIZE - sizeof(struct afs_cache_vnode_block)) / sizeof(struct afs_cache_rights))
932 +/*****************************************************************************/
934 + * vnode data "page directory" block
935 + * - first 1024 pages don't map through here
936 + * - PAGE_SIZE in size
938 +struct afs_cache_pgd_block
941 + afs_cache_bix_t pt_bix[1023]; /* "page table" block indices */
944 +/*****************************************************************************/
946 + * vnode data "page table" block
947 + * - PAGE_SIZE in size
949 +struct afs_cache_pt_block
951 + afs_cache_bix_t page_bix[1024]; /* "page" block indices */
955 +#endif /* _LINUX_AFS_CACHE_LAYOUT_H */
956 diff -urNp linux-5240/fs/afs/callback.c linux-5250/fs/afs/callback.c
957 --- linux-5240/fs/afs/callback.c 1970-01-01 01:00:00.000000000 +0100
958 +++ linux-5250/fs/afs/callback.c
961 + * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
963 + * This software may be freely redistributed under the terms of the
964 + * GNU General Public License.
966 + * You should have received a copy of the GNU General Public License
967 + * along with this program; if not, write to the Free Software
968 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
970 + * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
971 + * David Howells <dhowells@redhat.com>
975 +#include <linux/kernel.h>
976 +#include <linux/module.h>
977 +#include <linux/init.h>
980 +#include "internal.h"
982 +/*****************************************************************************/
984 + * allow the fileserver to request callback state (re-)initialisation
986 +int SRXAFSCM_InitCallBackState(afs_server_t *server)
988 + struct list_head callbacks;
990 + _enter("%p",server);
992 + INIT_LIST_HEAD(&callbacks);
994 + /* transfer the callback list from the server to a temp holding area */
995 + spin_lock(&server->cb_lock);
997 + list_add(&callbacks,&server->cb_promises);
998 + list_del_init(&server->cb_promises);
1000 + /* munch our way through the list, grabbing the inode, dropping all the locks and regetting
1001 + * them in the right order
1003 + while (!list_empty(&callbacks)) {
1004 + struct inode *inode;
1005 + afs_vnode_t *vnode;
1007 + vnode = list_entry(callbacks.next,afs_vnode_t,cb_link);
1008 + list_del_init(&vnode->cb_link);
1010 + /* try and grab the inode - may fail */
1011 + inode = igrab(AFS_VNODE_TO_I(vnode));
1015 + spin_unlock(&server->cb_lock);
1016 + spin_lock(&vnode->lock);
1018 + if (cmpxchg(&vnode->cb_server,server,NULL)==server) {
1019 + afs_kafstimod_del_timer(&vnode->cb_timeout);
1020 + spin_lock(&afs_cb_hash_lock);
1021 + list_del_init(&vnode->cb_hash_link);
1022 + spin_unlock(&afs_cb_hash_lock);
1026 + spin_unlock(&vnode->lock);
1029 + if (release) afs_put_server(server);
1031 + spin_lock(&server->cb_lock);
1035 + spin_unlock(&server->cb_lock);
1039 +} /* end SRXAFSCM_InitCallBackState() */
1041 +/*****************************************************************************/
1043 + * allow the fileserver to break callback promises
1045 +int SRXAFSCM_CallBack(afs_server_t *server, size_t count, afs_callback_t callbacks[])
1047 + struct list_head *_p;
1049 + _enter("%p,%u,",server,count);
1051 + for (; count>0; callbacks++, count--) {
1052 + struct inode *inode = NULL;
1053 + afs_vnode_t *vnode = NULL;
1056 + _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }",
1057 + callbacks->fid.vid,
1058 + callbacks->fid.vnode,
1059 + callbacks->fid.unique,
1060 + callbacks->version,
1061 + callbacks->expiry,
1065 + /* find the inode for this fid */
1066 + spin_lock(&afs_cb_hash_lock);
1068 + list_for_each(_p,&afs_cb_hash(server,&callbacks->fid)) {
1069 + vnode = list_entry(_p,afs_vnode_t,cb_hash_link);
1071 + if (memcmp(&vnode->fid,&callbacks->fid,sizeof(afs_fid_t))!=0)
1074 + /* right vnode, but is it same server? */
1075 + if (vnode->cb_server!=server)
1078 + /* try and nail the inode down */
1079 + inode = igrab(AFS_VNODE_TO_I(vnode));
1083 + spin_unlock(&afs_cb_hash_lock);
1086 + /* we've found the record for this vnode */
1087 + spin_lock(&vnode->lock);
1088 + if (cmpxchg(&vnode->cb_server,server,NULL)==server) {
1089 + /* the callback _is_ on the calling server */
1092 + afs_kafstimod_del_timer(&vnode->cb_timeout);
1093 + vnode->flags |= AFS_VNODE_CHANGED;
1095 + spin_lock(&server->cb_lock);
1096 + list_del_init(&vnode->cb_link);
1097 + spin_unlock(&server->cb_lock);
1099 + spin_lock(&afs_cb_hash_lock);
1100 + list_del_init(&vnode->cb_hash_link);
1101 + spin_unlock(&afs_cb_hash_lock);
1103 + spin_unlock(&vnode->lock);
1106 + invalidate_inode_pages(inode);
1107 + afs_put_server(server);
1115 +} /* end SRXAFSCM_CallBack() */
1117 +/*****************************************************************************/
1119 + * allow the fileserver to see if the cache manager is still alive
1121 +int SRXAFSCM_Probe(afs_server_t *server)
1123 + _debug("SRXAFSCM_Probe(%p)\n",server);
1125 +} /* end SRXAFSCM_Probe() */
1126 diff -urNp linux-5240/fs/afs/cell.c linux-5250/fs/afs/cell.c
1127 --- linux-5240/fs/afs/cell.c 1970-01-01 01:00:00.000000000 +0100
1128 +++ linux-5250/fs/afs/cell.c
1130 +/* cell.c: AFS cell and server record management
1132 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
1133 + * Written by David Howells (dhowells@redhat.com)
1135 + * This program is free software; you can redistribute it and/or
1136 + * modify it under the terms of the GNU General Public License
1137 + * as published by the Free Software Foundation; either version
1138 + * 2 of the License, or (at your option) any later version.
1141 +#include <linux/module.h>
1142 +#include <linux/sched.h>
1143 +#include <linux/slab.h>
1144 +#include <rxrpc/peer.h>
1145 +#include <rxrpc/connection.h>
1146 +#include "volume.h"
1148 +#include "server.h"
1149 +#include "transport.h"
1151 +#include "vlclient.h"
1152 +#include "kafstimod.h"
1154 +#include "internal.h"
1156 +DECLARE_RWSEM(afs_proc_cells_sem);
1157 +LIST_HEAD(afs_proc_cells);
1159 +static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells);
1160 +static rwlock_t afs_cells_lock = RW_LOCK_UNLOCKED;
1161 +static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */
1162 +static afs_cell_t *afs_cell_root;
1164 +static char *rootcell;
1166 +MODULE_PARM(rootcell,"s");
1167 +MODULE_PARM_DESC(rootcell,"root AFS cell name and VL server IP addr list");
1169 +/*****************************************************************************/
1171 + * create a cell record
1172 + * - "name" is the name of the cell
1173 + * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
1175 +int afs_cell_create(const char *name, char *vllist, afs_cell_t **_cell)
1181 + _enter("%s",name);
1183 + if (!name) BUG(); /* TODO: want to look up "this cell" in the cache */
1185 + down_write(&afs_cells_sem);
1187 + /* allocate and initialise a cell record */
1188 + cell = kmalloc(sizeof(afs_cell_t) + strlen(name) + 1,GFP_KERNEL);
1190 + _leave(" = -ENOMEM");
1194 + memset(cell,0,sizeof(afs_cell_t));
1195 + atomic_set(&cell->usage,0);
1197 + INIT_LIST_HEAD(&cell->link);
1199 + rwlock_init(&cell->sv_lock);
1200 + INIT_LIST_HEAD(&cell->sv_list);
1201 + INIT_LIST_HEAD(&cell->sv_graveyard);
1202 + spin_lock_init(&cell->sv_gylock);
1204 + init_rwsem(&cell->vl_sem);
1205 + INIT_LIST_HEAD(&cell->vl_list);
1206 + INIT_LIST_HEAD(&cell->vl_graveyard);
1207 + spin_lock_init(&cell->vl_gylock);
1209 + strcpy(cell->name,name);
1211 + /* fill in the VL server list from the rest of the string */
1214 + unsigned a, b, c, d;
1216 + next = strchr(vllist,':');
1217 + if (next) *next++ = 0;
1219 + if (sscanf(vllist,"%u.%u.%u.%u",&a,&b,&c,&d)!=4)
1222 + if (a>255 || b>255 || c>255 || d>255)
1225 + cell->vl_addrs[cell->vl_naddrs++].s_addr =
1226 + htonl((a<<24)|(b<<16)|(c<<8)|d);
1228 + if (cell->vl_naddrs>=16)
1231 + } while(vllist=next, vllist);
1233 + /* add a proc dir for this cell */
1234 + ret = afs_proc_cell_setup(cell);
1238 + /* add to the cell lists */
1239 + write_lock(&afs_cells_lock);
1240 + list_add_tail(&cell->link,&afs_cells);
1241 + write_unlock(&afs_cells_lock);
1243 + down_write(&afs_proc_cells_sem);
1244 + list_add_tail(&cell->proc_link,&afs_proc_cells);
1245 + up_write(&afs_proc_cells_sem);
1248 + up_write(&afs_cells_sem);
1250 + _leave(" = 0 (%p)",cell);
1254 + printk("kAFS: bad VL server IP address: '%s'\n",vllist);
1256 + up_write(&afs_cells_sem);
1257 + kfree(afs_cell_root);
1259 +} /* end afs_cell_create() */
1261 +/*****************************************************************************/
1263 + * initialise the cell database from module parameters
1265 +int afs_cell_init(void)
1273 + printk("kAFS: no root cell specified\n");
1277 + cp = strchr(rootcell,':');
1279 + printk("kAFS: no VL server IP addresses specified\n");
1283 + /* allocate a cell record for the root cell */
1285 + ret = afs_cell_create(rootcell,cp,&afs_cell_root);
1287 + afs_get_cell(afs_cell_root);
1289 + _leave(" = %d",ret);
1292 +} /* end afs_cell_init() */
1294 +/*****************************************************************************/
1296 + * lookup a cell record
1298 +int afs_cell_lookup(afs_cache_t *cache, const char *name, afs_cell_t **_cell)
1300 + struct list_head *_p;
1303 + _enter("\"%s\",",name?name:"*thiscell*");
1305 + cell = afs_cell_root;
1308 + /* if the cell was named, look for it in the cell record list */
1310 + read_lock(&afs_cells_lock);
1312 + list_for_each(_p,&afs_cells) {
1313 + cell = list_entry(_p,afs_cell_t,link);
1314 + if (strcmp(cell->name,name)==0)
1319 + read_unlock(&afs_cells_lock);
1323 + afs_get_cell(cell);
1326 + _leave(" = %d (%p)",cell?0:-ENOENT,cell);
1327 + return cell ? 0 : -ENOENT;
1329 +} /* end afs_cell_lookup() */
1331 +/*****************************************************************************/
1333 + * try and get a cell record
1335 +afs_cell_t *afs_get_cell_maybe(afs_cell_t **_cell)
1339 + write_lock(&afs_cells_lock);
1342 + if (cell && !list_empty(&cell->link))
1343 + atomic_inc(&cell->usage);
1347 + write_unlock(&afs_cells_lock);
1350 +} /* end afs_get_cell_maybe() */
1352 +/*****************************************************************************/
1354 + * destroy a cell record
1356 +void afs_put_cell(afs_cell_t *cell)
1358 + _enter("%p{%d,%s}",cell,atomic_read(&cell->usage),cell->name);
1360 + /* sanity check */
1361 + if (atomic_read(&cell->usage)<=0)
1364 + /* to prevent a race, the decrement and the dequeue must be effectively atomic */
1365 + write_lock(&afs_cells_lock);
1367 + if (likely(!atomic_dec_and_test(&cell->usage))) {
1368 + write_unlock(&afs_cells_lock);
1373 + write_unlock(&afs_cells_lock);
1375 + if (!list_empty(&cell->sv_list)) BUG();
1376 + if (!list_empty(&cell->sv_graveyard)) BUG();
1377 + if (!list_empty(&cell->vl_list)) BUG();
1378 + if (!list_empty(&cell->vl_graveyard)) BUG();
1380 + _leave(" [unused]");
1381 +} /* end afs_put_cell() */
1383 +/*****************************************************************************/
1385 + * destroy a cell record
1387 +static void afs_cell_destroy(afs_cell_t *cell)
1389 + _enter("%p{%d,%s}",cell,atomic_read(&cell->usage),cell->name);
1391 + /* to prevent a race, the decrement and the dequeue must be effectively atomic */
1392 + write_lock(&afs_cells_lock);
1394 + /* sanity check */
1395 + if (atomic_read(&cell->usage)!=0)
1398 + list_del_init(&cell->link);
1400 + write_unlock(&afs_cells_lock);
1402 + down_write(&afs_cells_sem);
1404 + afs_proc_cell_remove(cell);
1406 + down_write(&afs_proc_cells_sem);
1407 + list_del_init(&afs_cell_root->proc_link);
1408 + up_write(&afs_proc_cells_sem);
1410 + up_write(&afs_cells_sem);
1412 + if (!list_empty(&cell->sv_list)) BUG();
1413 + if (!list_empty(&cell->sv_graveyard)) BUG();
1414 + if (!list_empty(&cell->vl_list)) BUG();
1415 + if (!list_empty(&cell->vl_graveyard)) BUG();
1417 + /* finish cleaning up the cell */
1420 + _leave(" [destroyed]");
1421 +} /* end afs_cell_destroy() */
1423 +/*****************************************************************************/
1425 + * lookup the server record corresponding to an Rx RPC peer
1427 +int afs_server_find_by_peer(const struct rxrpc_peer *peer, afs_server_t **_server)
1429 + struct list_head *_pc, *_ps;
1430 + afs_server_t *server;
1433 + _enter("%p{a=%08x},",peer,ntohl(peer->addr.s_addr));
1435 + /* search the cell list */
1436 + read_lock(&afs_cells_lock);
1438 + list_for_each(_pc,&afs_cells) {
1439 + cell = list_entry(_pc,afs_cell_t,link);
1441 + _debug("? cell %s",cell->name);
1443 + write_lock(&cell->sv_lock);
1445 + /* check the active list */
1446 + list_for_each(_ps,&cell->sv_list) {
1447 + server = list_entry(_ps,afs_server_t,link);
1449 + _debug("?? server %08x",ntohl(server->addr.s_addr));
1451 + if (memcmp(&server->addr,&peer->addr,sizeof(struct in_addr))==0)
1452 + goto found_server;
1455 + /* check the inactive list */
1456 + spin_lock(&cell->sv_gylock);
1457 + list_for_each(_ps,&cell->sv_graveyard) {
1458 + server = list_entry(_ps,afs_server_t,link);
1460 + _debug("?? dead server %08x",ntohl(server->addr.s_addr));
1462 + if (memcmp(&server->addr,&peer->addr,sizeof(struct in_addr))==0)
1463 + goto found_dead_server;
1465 + spin_unlock(&cell->sv_gylock);
1467 + write_unlock(&cell->sv_lock);
1469 + read_unlock(&afs_cells_lock);
1471 + _leave(" = -ENOENT");
1474 + /* we found it in the graveyard - resurrect it */
1475 + found_dead_server:
1476 + list_del(&server->link);
1477 + list_add_tail(&server->link,&cell->sv_list);
1478 + afs_get_server(server);
1479 + afs_kafstimod_del_timer(&server->timeout);
1480 + spin_unlock(&cell->sv_gylock);
1483 + /* we found it - increment its ref count and return it */
1485 + afs_get_server(server);
1488 + write_unlock(&cell->sv_lock);
1489 + read_unlock(&afs_cells_lock);
1491 + *_server = server;
1492 + _leave(" = 0 (s=%p c=%p)",server,cell);
1495 +} /* end afs_server_find_by_peer() */
1497 +/*****************************************************************************/
1499 + * purge in-memory cell database on module unload
1500 + * - the timeout daemon is stopped before calling this
1502 +void afs_cell_purge(void)
1504 + afs_vlocation_t *vlocation;
1509 + if (afs_cell_root)
1510 + afs_put_cell(afs_cell_root);
1512 + while (!list_empty(&afs_cells)) {
1515 + /* remove the next cell from the front of the list */
1516 + write_lock(&afs_cells_lock);
1518 + if (!list_empty(&afs_cells)) {
1519 + cell = list_entry(afs_cells.next,afs_cell_t,link);
1520 + list_del_init(&cell->link);
1523 + write_unlock(&afs_cells_lock);
1526 + _debug("PURGING CELL %s (%d)",cell->name,atomic_read(&cell->usage));
1528 + if (!list_empty(&cell->sv_list)) BUG();
1529 + if (!list_empty(&cell->vl_list)) BUG();
1531 + /* purge the cell's VL graveyard list */
1532 + _debug(" - clearing VL graveyard");
1534 + spin_lock(&cell->vl_gylock);
1536 + while (!list_empty(&cell->vl_graveyard)) {
1537 + vlocation = list_entry(cell->vl_graveyard.next,
1538 + afs_vlocation_t,link);
1539 + list_del_init(&vlocation->link);
1541 + afs_kafstimod_del_timer(&vlocation->timeout);
1543 + spin_unlock(&cell->vl_gylock);
1545 + afs_vlocation_do_timeout(vlocation);
1546 +#warning race if move to use krxtimod instead of kafstimod
1548 + spin_lock(&cell->vl_gylock);
1551 + spin_unlock(&cell->vl_gylock);
1553 + /* purge the cell's server graveyard list */
1554 + _debug(" - clearing server graveyard");
1556 + spin_lock(&cell->sv_gylock);
1558 + while (!list_empty(&cell->sv_graveyard)) {
1559 + afs_server_t *server;
1561 + server = list_entry(cell->sv_graveyard.next,afs_server_t,link);
1562 + list_del_init(&server->link);
1564 + afs_kafstimod_del_timer(&server->timeout);
1566 + spin_unlock(&cell->sv_gylock);
1568 + afs_server_do_timeout(server);
1570 + spin_lock(&cell->sv_gylock);
1573 + spin_unlock(&cell->sv_gylock);
1575 + /* now the cell should be left with no references */
1576 + afs_cell_destroy(cell);
1581 +} /* end afs_cell_purge() */
1582 diff -urNp linux-5240/fs/afs/cell.h linux-5250/fs/afs/cell.h
1583 --- linux-5240/fs/afs/cell.h 1970-01-01 01:00:00.000000000 +0100
1584 +++ linux-5250/fs/afs/cell.h
1586 +/* cell.h: AFS cell record
1588 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
1589 + * Written by David Howells (dhowells@redhat.com)
1591 + * This program is free software; you can redistribute it and/or
1592 + * modify it under the terms of the GNU General Public License
1593 + * as published by the Free Software Foundation; either version
1594 + * 2 of the License, or (at your option) any later version.
1597 +#ifndef _LINUX_AFS_CELL_H
1598 +#define _LINUX_AFS_CELL_H
1601 +#include "cache-layout.h"
1603 +extern volatile int afs_cells_being_purged; /* T when cells are being purged by rmmod */
1605 +/*****************************************************************************/
1612 + struct list_head link; /* main cell list link */
1613 + struct list_head proc_link; /* /proc cell list link */
1614 + struct proc_dir_entry *proc_dir; /* /proc dir for this cell */
1615 + afs_cache_cellix_t cache_ix; /* cell cache index */
1617 + /* server record management */
1618 + rwlock_t sv_lock; /* active server list lock */
1619 + struct list_head sv_list; /* active server list */
1620 + struct list_head sv_graveyard; /* inactive server list */
1621 + spinlock_t sv_gylock; /* inactive server list lock */
1623 + /* volume location record management */
1624 + struct rw_semaphore vl_sem; /* volume management serialisation semaphore */
1625 + struct list_head vl_list; /* cell's active VL record list */
1626 + struct list_head vl_graveyard; /* cell's inactive VL record list */
1627 + spinlock_t vl_gylock; /* graveyard lock */
1628 + unsigned short vl_naddrs; /* number of VL servers in addr list */
1629 + unsigned short vl_curr_svix; /* current server index */
1630 + struct in_addr vl_addrs[16]; /* cell VL server addresses */
1632 + char name[0]; /* cell name - must go last */
1635 +extern int afs_cell_init(void);
1637 +extern int afs_cell_create(const char *name, char *vllist, afs_cell_t **_cell);
1639 +extern int afs_cell_lookup(afs_cache_t *cache, const char *name, afs_cell_t **_cell);
1641 +#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
1643 +extern afs_cell_t *afs_get_cell_maybe(afs_cell_t **_cell);
1645 +extern void afs_put_cell(afs_cell_t *cell);
1647 +extern void afs_cell_purge(void);
1649 +#endif /* _LINUX_AFS_CELL_H */
1650 diff -urNp linux-5240/fs/afs/cmservice.c linux-5250/fs/afs/cmservice.c
1651 --- linux-5240/fs/afs/cmservice.c 1970-01-01 01:00:00.000000000 +0100
1652 +++ linux-5250/fs/afs/cmservice.c
1654 +/* cmservice.c: AFS Cache Manager Service
1656 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
1657 + * Written by David Howells (dhowells@redhat.com)
1659 + * This program is free software; you can redistribute it and/or
1660 + * modify it under the terms of the GNU General Public License
1661 + * as published by the Free Software Foundation; either version
1662 + * 2 of the License, or (at your option) any later version.
1665 +#include <linux/version.h>
1666 +#include <linux/module.h>
1667 +#include <linux/init.h>
1668 +#include <linux/sched.h>
1669 +#include <linux/completion.h>
1670 +#include "server.h"
1672 +#include "transport.h"
1673 +#include <rxrpc/rxrpc.h>
1674 +#include <rxrpc/transport.h>
1675 +#include <rxrpc/connection.h>
1676 +#include <rxrpc/call.h>
1677 +#include "cmservice.h"
1678 +#include "internal.h"
1680 +static unsigned afscm_usage; /* AFS cache manager usage count */
1681 +static struct rw_semaphore afscm_sem; /* AFS cache manager start/stop semaphore */
1683 +static int afscm_new_call(struct rxrpc_call *call);
1684 +static void afscm_attention(struct rxrpc_call *call);
1685 +static void afscm_error(struct rxrpc_call *call);
1686 +static void afscm_aemap(struct rxrpc_call *call);
1688 +static void _SRXAFSCM_CallBack(struct rxrpc_call *call);
1689 +static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call);
1690 +static void _SRXAFSCM_Probe(struct rxrpc_call *call);
1692 +typedef void (*_SRXAFSCM_xxxx_t)(struct rxrpc_call *call);
1694 +static const struct rxrpc_operation AFSCM_ops[] = {
1697 + asize: RXRPC_APP_MARK_EOF,
1699 + user: _SRXAFSCM_CallBack,
1703 + asize: RXRPC_APP_MARK_EOF,
1704 + name: "InitCallBackState",
1705 + user: _SRXAFSCM_InitCallBackState,
1709 + asize: RXRPC_APP_MARK_EOF,
1711 + user: _SRXAFSCM_Probe,
1716 + asize: RXRPC_APP_MARK_EOF,
1718 + user: _SRXAFSCM_GetLock,
1722 + asize: RXRPC_APP_MARK_EOF,
1724 + user: _SRXAFSCM_GetCE,
1728 + asize: RXRPC_APP_MARK_EOF,
1729 + name: "GetXStatsVersion",
1730 + user: _SRXAFSCM_GetXStatsVersion,
1734 + asize: RXRPC_APP_MARK_EOF,
1735 + name: "GetXStats",
1736 + user: _SRXAFSCM_GetXStats,
1741 +static struct rxrpc_service AFSCM_service = {
1743 + owner: THIS_MODULE,
1744 + link: LIST_HEAD_INIT(AFSCM_service.link),
1745 + new_call: afscm_new_call,
1747 + attn_func: afscm_attention,
1748 + error_func: afscm_error,
1749 + aemap_func: afscm_aemap,
1750 + ops_begin: &AFSCM_ops[0],
1751 + ops_end: &AFSCM_ops[sizeof(AFSCM_ops)/sizeof(AFSCM_ops[0])],
1754 +static DECLARE_COMPLETION(kafscmd_alive);
1755 +static DECLARE_COMPLETION(kafscmd_dead);
1756 +static DECLARE_WAIT_QUEUE_HEAD(kafscmd_sleepq);
1757 +static LIST_HEAD(kafscmd_attention_list);
1758 +static LIST_HEAD(afscm_calls);
1759 +static spinlock_t afscm_calls_lock = SPIN_LOCK_UNLOCKED;
1760 +static spinlock_t kafscmd_attention_lock = SPIN_LOCK_UNLOCKED;
1761 +static int kafscmd_die;
1763 +/*****************************************************************************/
1765 + * AFS Cache Manager kernel thread
1767 +static int kafscmd(void *arg)
1769 + DECLARE_WAITQUEUE(myself,current);
1771 + struct rxrpc_call *call;
1772 + _SRXAFSCM_xxxx_t func;
1775 + printk("kAFS: Started kafscmd %d\n",current->pid);
1776 + strcpy(current->comm,"kafscmd");
1780 + complete(&kafscmd_alive);
1782 + /* only certain signals are of interest */
1783 + spin_lock_irq(¤t->sigmask_lock);
1784 + siginitsetinv(¤t->blocked,0);
1785 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
1786 + recalc_sigpending();
1788 + recalc_sigpending(current);
1790 + spin_unlock_irq(¤t->sigmask_lock);
1792 + /* loop around looking for things to attend to */
1794 + if (list_empty(&kafscmd_attention_list)) {
1795 + set_current_state(TASK_INTERRUPTIBLE);
1796 + add_wait_queue(&kafscmd_sleepq,&myself);
1799 + set_current_state(TASK_INTERRUPTIBLE);
1800 + if (!list_empty(&kafscmd_attention_list) ||
1801 + signal_pending(current) ||
1808 + remove_wait_queue(&kafscmd_sleepq,&myself);
1809 + set_current_state(TASK_RUNNING);
1812 + die = kafscmd_die;
1814 + /* dequeue the next call requiring attention */
1816 + spin_lock(&kafscmd_attention_lock);
1818 + if (!list_empty(&kafscmd_attention_list)) {
1819 + call = list_entry(kafscmd_attention_list.next,
1820 + struct rxrpc_call,
1822 + list_del_init(&call->app_attn_link);
1826 + spin_unlock(&kafscmd_attention_lock);
1830 + _debug("@@@ Begin Attend Call %p",call);
1832 + func = call->app_user;
1836 + rxrpc_put_call(call);
1838 + _debug("@@@ End Attend Call %p",call);
1843 + /* and that's all */
1844 + complete_and_exit(&kafscmd_dead,0);
1846 +} /* end kafscmd() */
1848 +/*****************************************************************************/
1850 + * handle a call coming in to the cache manager
1851 + * - if I want to keep the call, I must increment its usage count
1852 + * - the return value will be negated and passed back in an abort packet if non-zero
1853 + * - serialised by virtue of there only being one krxiod
1855 +static int afscm_new_call(struct rxrpc_call *call)
1857 + _enter("%p{cid=%u u=%d}",call,ntohl(call->call_id),atomic_read(&call->usage));
1859 + rxrpc_get_call(call);
1861 + /* add to my current call list */
1862 + spin_lock(&afscm_calls_lock);
1863 + list_add(&call->app_link,&afscm_calls);
1864 + spin_unlock(&afscm_calls_lock);
1869 +} /* end afscm_new_call() */
1871 +/*****************************************************************************/
1873 + * queue on the kafscmd queue for attention
1875 +static void afscm_attention(struct rxrpc_call *call)
1877 + _enter("%p{cid=%u u=%d}",call,ntohl(call->call_id),atomic_read(&call->usage));
1879 + spin_lock(&kafscmd_attention_lock);
1881 + if (list_empty(&call->app_attn_link)) {
1882 + list_add_tail(&call->app_attn_link,&kafscmd_attention_list);
1883 + rxrpc_get_call(call);
1886 + spin_unlock(&kafscmd_attention_lock);
1888 + wake_up(&kafscmd_sleepq);
1890 + _leave(" {u=%d}",atomic_read(&call->usage));
1891 +} /* end afscm_attention() */
1893 +/*****************************************************************************/
1895 + * handle my call being aborted
1896 + * - clean up, dequeue and put my ref to the call
1898 +static void afscm_error(struct rxrpc_call *call)
1902 + _enter("%p{est=%s ac=%u er=%d}",
1904 + rxrpc_call_error_states[call->app_err_state],
1905 + call->app_abort_code,
1908 + spin_lock(&kafscmd_attention_lock);
1910 + if (list_empty(&call->app_attn_link)) {
1911 + list_add_tail(&call->app_attn_link,&kafscmd_attention_list);
1912 + rxrpc_get_call(call);
1915 + spin_unlock(&kafscmd_attention_lock);
1918 + spin_lock(&afscm_calls_lock);
1919 + if (!list_empty(&call->app_link)) {
1920 + list_del_init(&call->app_link);
1923 + spin_unlock(&afscm_calls_lock);
1926 + rxrpc_put_call(call);
1928 + wake_up(&kafscmd_sleepq);
1931 +} /* end afscm_error() */
1933 +/*****************************************************************************/
1935 + * map afs abort codes to/from Linux error codes
1936 + * - called with call->lock held
1938 +static void afscm_aemap(struct rxrpc_call *call)
1940 + switch (call->app_err_state) {
1941 + case RXRPC_ESTATE_LOCAL_ABORT:
1942 + call->app_abort_code = -call->app_errno;
1944 + case RXRPC_ESTATE_PEER_ABORT:
1945 + call->app_errno = -ECONNABORTED;
1950 +} /* end afscm_aemap() */
1952 +/*****************************************************************************/
1954 + * start the cache manager service if not already started
1956 +int afscm_start(void)
1960 + down_write(&afscm_sem);
1961 + if (!afscm_usage) {
1962 + ret = kernel_thread(kafscmd,NULL,0);
1966 + wait_for_completion(&kafscmd_alive);
1968 + ret = rxrpc_add_service(afs_transport,&AFSCM_service);
1974 + up_write(&afscm_sem);
1980 + wake_up(&kafscmd_sleepq);
1981 + wait_for_completion(&kafscmd_dead);
1984 + up_write(&afscm_sem);
1987 +} /* end afscm_start() */
1989 +/*****************************************************************************/
1991 + * stop the cache manager service
1993 +void afscm_stop(void)
1995 + struct rxrpc_call *call;
1997 + down_write(&afscm_sem);
1999 + if (afscm_usage==0) BUG();
2002 + if (afscm_usage==0) {
2003 + /* don't want more incoming calls */
2004 + rxrpc_del_service(afs_transport,&AFSCM_service);
2006 + /* abort any calls I've still got open (the afscm_error() will dequeue them) */
2007 + spin_lock(&afscm_calls_lock);
2008 + while (!list_empty(&afscm_calls)) {
2009 + call = list_entry(afscm_calls.next,struct rxrpc_call,app_link);
2010 + list_del_init(&call->app_link);
2011 + rxrpc_get_call(call);
2012 + spin_unlock(&afscm_calls_lock);
2014 + rxrpc_call_abort(call,-ESRCH); /* abort, dequeue and put */
2016 + rxrpc_put_call(call);
2018 + spin_lock(&afscm_calls_lock);
2020 + spin_unlock(&afscm_calls_lock);
2022 + /* get rid of my daemon */
2024 + wake_up(&kafscmd_sleepq);
2025 + wait_for_completion(&kafscmd_dead);
2027 + /* dispose of any calls waiting for attention */
2028 + spin_lock(&kafscmd_attention_lock);
2029 + while (!list_empty(&kafscmd_attention_list)) {
2030 + call = list_entry(kafscmd_attention_list.next,
2031 + struct rxrpc_call,
2034 + list_del_init(&call->app_attn_link);
2035 + spin_unlock(&kafscmd_attention_lock);
2037 + rxrpc_put_call(call);
2039 + spin_lock(&kafscmd_attention_lock);
2041 + spin_unlock(&kafscmd_attention_lock);
2044 + up_write(&afscm_sem);
2046 +} /* end afscm_stop() */
2048 +/*****************************************************************************/
2050 + * handle the fileserver breaking a set of callbacks
2052 +static void _SRXAFSCM_CallBack(struct rxrpc_call *call)
2054 + afs_server_t *server;
2055 + size_t count, qty, tmp;
2056 + int ret = 0, removed;
2058 + _enter("%p{acs=%s}",call,rxrpc_call_states[call->app_call_state]);
2060 + server = afs_server_get_from_peer(call->conn->peer);
2062 + switch (call->app_call_state) {
2063 + /* we've received the last packet
2064 + * - drain all the data from the call and send the reply
2066 + case RXRPC_CSTATE_SRVR_GOT_ARGS:
2068 + qty = call->app_ready_qty;
2069 + if (qty<8 || qty>50*(6*4)+8)
2073 + afs_callback_t *cb, *pcb;
2077 + fp = rxrpc_call_alloc_scratch(call,qty);
2079 + /* drag the entire argument block out to the scratch space */
2080 + ret = rxrpc_call_read_data(call,fp,qty,0);
2084 + /* and unmarshall the parameter block */
2086 + count = ntohl(*fp++);
2087 + if (count>AFSCBMAX ||
2088 + (count*(3*4)+8 != qty && count*(6*4)+8 != qty))
2091 + bp = fp + count*3;
2092 + tmp = ntohl(*bp++);
2093 + if (tmp>0 && tmp!=count)
2098 + pcb = cb = rxrpc_call_alloc_scratch_s(call,afs_callback_t);
2100 + for (loop=count-1; loop>=0; loop--) {
2101 + pcb->fid.vid = ntohl(*fp++);
2102 + pcb->fid.vnode = ntohl(*fp++);
2103 + pcb->fid.unique = ntohl(*fp++);
2105 + pcb->version = ntohl(*bp++);
2106 + pcb->expiry = ntohl(*bp++);
2107 + pcb->type = ntohl(*bp++);
2112 + pcb->type = AFSCM_CB_UNTYPED;
2117 + /* invoke the actual service routine */
2118 + ret = SRXAFSCM_CallBack(server,count,cb);
2123 + /* send the reply */
2124 + ret = rxrpc_call_write_data(call,0,NULL,RXRPC_LAST_PACKET,GFP_KERNEL,0,&count);
2129 + /* operation complete */
2130 + case RXRPC_CSTATE_COMPLETE:
2131 + call->app_user = NULL;
2133 + spin_lock(&afscm_calls_lock);
2134 + if (!list_empty(&call->app_link)) {
2135 + list_del_init(&call->app_link);
2138 + spin_unlock(&afscm_calls_lock);
2141 + rxrpc_put_call(call);
2144 + /* operation terminated on error */
2145 + case RXRPC_CSTATE_ERROR:
2146 + call->app_user = NULL;
2154 + rxrpc_call_abort(call,ret);
2156 + if (server) afs_put_server(server);
2158 + _leave(" = %d",ret);
2160 +} /* end _SRXAFSCM_CallBack() */
2162 +/*****************************************************************************/
2164 + * handle the fileserver asking us to initialise our callback state
2166 +static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call)
2168 + afs_server_t *server;
2170 + int ret = 0, removed;
2172 + _enter("%p{acs=%s}",call,rxrpc_call_states[call->app_call_state]);
2174 + server = afs_server_get_from_peer(call->conn->peer);
2176 + switch (call->app_call_state) {
2177 + /* we've received the last packet - drain all the data from the call */
2178 + case RXRPC_CSTATE_SRVR_GOT_ARGS:
2179 + /* shouldn't be any args */
2183 + /* send the reply when asked for it */
2184 + case RXRPC_CSTATE_SRVR_SND_REPLY:
2185 + /* invoke the actual service routine */
2186 + ret = SRXAFSCM_InitCallBackState(server);
2190 + ret = rxrpc_call_write_data(call,0,NULL,RXRPC_LAST_PACKET,GFP_KERNEL,0,&count);
2195 + /* operation complete */
2196 + case RXRPC_CSTATE_COMPLETE:
2197 + call->app_user = NULL;
2199 + spin_lock(&afscm_calls_lock);
2200 + if (!list_empty(&call->app_link)) {
2201 + list_del_init(&call->app_link);
2204 + spin_unlock(&afscm_calls_lock);
2207 + rxrpc_put_call(call);
2210 + /* operation terminated on error */
2211 + case RXRPC_CSTATE_ERROR:
2212 + call->app_user = NULL;
2220 + rxrpc_call_abort(call,ret);
2222 + if (server) afs_put_server(server);
2224 + _leave(" = %d",ret);
2226 +} /* end _SRXAFSCM_InitCallBackState() */
2228 +/*****************************************************************************/
2230 + * handle a probe from a fileserver
2232 +static void _SRXAFSCM_Probe(struct rxrpc_call *call)
2234 + afs_server_t *server;
2236 + int ret = 0, removed;
2238 + _enter("%p{acs=%s}",call,rxrpc_call_states[call->app_call_state]);
2240 + server = afs_server_get_from_peer(call->conn->peer);
2242 + switch (call->app_call_state) {
2243 + /* we've received the last packet - drain all the data from the call */
2244 + case RXRPC_CSTATE_SRVR_GOT_ARGS:
2245 + /* shouldn't be any args */
2249 + /* send the reply when asked for it */
2250 + case RXRPC_CSTATE_SRVR_SND_REPLY:
2251 + /* invoke the actual service routine */
2252 + ret = SRXAFSCM_Probe(server);
2256 + ret = rxrpc_call_write_data(call,0,NULL,RXRPC_LAST_PACKET,GFP_KERNEL,0,&count);
2261 + /* operation complete */
2262 + case RXRPC_CSTATE_COMPLETE:
2263 + call->app_user = NULL;
2265 + spin_lock(&afscm_calls_lock);
2266 + if (!list_empty(&call->app_link)) {
2267 + list_del_init(&call->app_link);
2270 + spin_unlock(&afscm_calls_lock);
2273 + rxrpc_put_call(call);
2276 + /* operation terminated on error */
2277 + case RXRPC_CSTATE_ERROR:
2278 + call->app_user = NULL;
2286 + rxrpc_call_abort(call,ret);
2288 + if (server) afs_put_server(server);
2290 + _leave(" = %d",ret);
2292 +} /* end _SRXAFSCM_Probe() */
2293 diff -urNp linux-5240/fs/afs/cmservice.h linux-5250/fs/afs/cmservice.h
2294 --- linux-5240/fs/afs/cmservice.h 1970-01-01 01:00:00.000000000 +0100
2295 +++ linux-5250/fs/afs/cmservice.h
2297 +/* cmservice.h: AFS Cache Manager Service declarations
2299 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
2300 + * Written by David Howells (dhowells@redhat.com)
2302 + * This program is free software; you can redistribute it and/or
2303 + * modify it under the terms of the GNU General Public License
2304 + * as published by the Free Software Foundation; either version
2305 + * 2 of the License, or (at your option) any later version.
2308 +#ifndef _LINUX_AFS_CMSERVICE_H
2309 +#define _LINUX_AFS_CMSERVICE_H
2311 +#include <rxrpc/transport.h>
2314 +/* cache manager start/stop */
2315 +extern int afscm_start(void);
2316 +extern void afscm_stop(void);
2318 +/* cache manager server functions */
2319 +extern int SRXAFSCM_InitCallBackState(afs_server_t *server);
2320 +extern int SRXAFSCM_CallBack(afs_server_t *server, size_t count, afs_callback_t callbacks[]);
2321 +extern int SRXAFSCM_Probe(afs_server_t *server);
2323 +#endif /* _LINUX_AFS_CMSERVICE_H */
2324 diff -urNp linux-5240/fs/afs/dir.c linux-5250/fs/afs/dir.c
2325 --- linux-5240/fs/afs/dir.c 1970-01-01 01:00:00.000000000 +0100
2326 +++ linux-5250/fs/afs/dir.c
2328 +/* dir.c: AFS filesystem directory handling
2330 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
2331 + * Written by David Howells (dhowells@redhat.com)
2333 + * This program is free software; you can redistribute it and/or
2334 + * modify it under the terms of the GNU General Public License
2335 + * as published by the Free Software Foundation; either version
2336 + * 2 of the License, or (at your option) any later version.
2339 +#include <linux/kernel.h>
2340 +#include <linux/module.h>
2341 +#include <linux/init.h>
2342 +#include <linux/sched.h>
2343 +#include <linux/slab.h>
2344 +#include <linux/fs.h>
2345 +#include <linux/pagemap.h>
2346 +#include <linux/smp_lock.h>
2348 +#include "volume.h"
2349 +#include <rxrpc/call.h>
2351 +#include "internal.h"
2353 +static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry);
2354 +static int afs_dir_open(struct inode *inode, struct file *file);
2355 +static int afs_dir_readdir(struct file *file, void *dirent, filldir_t filldir);
2356 +static int afs_d_revalidate(struct dentry *dentry, int flags);
2357 +static int afs_d_delete(struct dentry *dentry);
2358 +static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, loff_t fpos,
2359 + ino_t ino, unsigned dtype);
2361 +struct file_operations afs_dir_file_operations = {
2362 + open: afs_dir_open,
2363 + readdir: afs_dir_readdir,
2366 +struct inode_operations afs_dir_inode_operations = {
2367 + lookup: afs_dir_lookup,
2368 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
2369 + getattr: afs_inode_getattr,
2371 + revalidate: afs_inode_revalidate,
2373 +// create: afs_dir_create,
2374 +// link: afs_dir_link,
2375 +// unlink: afs_dir_unlink,
2376 +// symlink: afs_dir_symlink,
2377 +// mkdir: afs_dir_mkdir,
2378 +// rmdir: afs_dir_rmdir,
2379 +// mknod: afs_dir_mknod,
2380 +// rename: afs_dir_rename,
2383 +static struct dentry_operations afs_fs_dentry_operations = {
2384 + d_revalidate: afs_d_revalidate,
2385 + d_delete: afs_d_delete,
2388 +#define AFS_DIR_HASHTBL_SIZE 128
2389 +#define AFS_DIR_DIRENT_SIZE 32
2390 +#define AFS_DIRENT_PER_BLOCK 64
2392 +typedef struct afs_dirent {
2399 + u8 overflow[4]; /* if any char of the name (inc NUL) reaches here, consume
2400 + * the next dirent too */
2401 + u8 extended_name[32];
2404 +/* AFS directory page header (one at the beginning of every 2048-byte chunk) */
2405 +typedef struct afs_dir_pagehdr {
2408 +#define AFS_DIR_MAGIC htons(1234)
2412 +} afs_dir_pagehdr_t;
2414 +/* directory block layout */
2415 +typedef union afs_dir_block {
2417 + afs_dir_pagehdr_t pagehdr;
2420 + afs_dir_pagehdr_t pagehdr;
2421 + u8 alloc_ctrs[128];
2422 + u16 hashtable[AFS_DIR_HASHTBL_SIZE]; /* dir hash table */
2425 + afs_dirent_t dirents[AFS_DIRENT_PER_BLOCK];
2428 +/* layout on a linux VM page */
2429 +typedef struct afs_dir_page {
2430 + afs_dir_block_t blocks[PAGE_SIZE/sizeof(afs_dir_block_t)];
2433 +struct afs_dir_lookup_cookie {
2440 +/*****************************************************************************/
2442 + * check that a directory page is valid
2444 +static inline void afs_dir_check_page(struct inode *dir, struct page *page)
2446 + afs_dir_page_t *dbuf;
2451 + /* check the page count */
2452 + qty = desc.size/sizeof(dbuf->blocks[0]);
2456 + if (page->index==0 && qty!=ntohs(dbuf->blocks[0].pagehdr.npages)) {
2457 + printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n",
2458 + __FUNCTION__,dir->i_ino,qty,ntohs(dbuf->blocks[0].pagehdr.npages));
2463 + /* determine how many magic numbers there should be in this page */
2464 + latter = dir->i_size - (page->index << PAGE_CACHE_SHIFT);
2465 + if (latter >= PAGE_SIZE)
2469 + qty /= sizeof(afs_dir_block_t);
2472 + dbuf = page_address(page);
2473 + for (tmp=0; tmp<qty; tmp++) {
2474 + if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) {
2475 + printk("kAFS: %s(%lu): bad magic %d/%d is %04hx\n",
2476 + __FUNCTION__,dir->i_ino,tmp,
2477 + qty,ntohs(dbuf->blocks[tmp].pagehdr.magic));
2482 + SetPageChecked(page);
2486 + SetPageChecked(page);
2487 + SetPageError(page);
2489 +} /* end afs_dir_check_page() */
2491 +/*****************************************************************************/
2493 + * discard a page cached in the pagecache
2495 +static inline void afs_dir_put_page(struct page *page)
2498 + page_cache_release(page);
2500 +} /* end afs_dir_put_page() */
2502 +/*****************************************************************************/
2504 + * get a page into the pagecache
2506 +static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
2508 + struct page *page;
2510 + _enter("{%lu},%lu",dir->i_ino,index);
2512 + page = read_cache_page(dir->i_mapping,index,
2513 + (filler_t*)dir->i_mapping->a_ops->readpage,NULL);
2514 + if (!IS_ERR(page)) {
2515 + wait_on_page_locked(page);
2517 + if (!PageUptodate(page))
2519 + if (!PageChecked(page))
2520 + afs_dir_check_page(dir,page);
2521 + if (PageError(page))
2527 + afs_dir_put_page(page);
2528 + return ERR_PTR(-EIO);
2529 +} /* end afs_dir_get_page() */
2531 +/*****************************************************************************/
2533 + * open an AFS directory file
2535 +static int afs_dir_open(struct inode *inode, struct file *file)
2537 + _enter("{%lu}",inode->i_ino);
2539 + if (sizeof(afs_dir_block_t) != 2048) BUG();
2540 + if (sizeof(afs_dirent_t) != 32) BUG();
2542 + if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED)
2548 +} /* end afs_dir_open() */
2550 +/*****************************************************************************/
2552 + * deal with one block in an AFS directory
2554 +static int afs_dir_iterate_block(unsigned *fpos,
2555 + afs_dir_block_t *block,
2558 + filldir_t filldir)
2560 + afs_dirent_t *dire;
2561 + unsigned offset, next, curr;
2565 + _enter("%u,%x,%p,,",*fpos,blkoff,block);
2567 + curr = (*fpos - blkoff) / sizeof(afs_dirent_t);
2569 + /* walk through the block, an entry at a time */
2570 + for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries;
2571 + offset < AFS_DIRENT_PER_BLOCK;
2574 + next = offset + 1;
2576 + /* skip entries marked unused in the bitmap */
2577 + if (!(block->pagehdr.bitmap[offset/8] & (1 << (offset % 8)))) {
2578 + _debug("ENT[%u.%u]: unused\n",blkoff/sizeof(afs_dir_block_t),offset);
2580 + *fpos = blkoff + next * sizeof(afs_dirent_t);
2584 + /* got a valid entry */
2585 + dire = &block->dirents[offset];
2586 + nlen = strnlen(dire->name,sizeof(*block) - offset*sizeof(afs_dirent_t));
2588 + _debug("ENT[%u.%u]: %s %u \"%.*s\"\n",
2589 + blkoff/sizeof(afs_dir_block_t),offset,
2590 + offset<curr ? "skip" : "fill",
2591 + nlen,nlen,dire->name);
2593 + /* work out where the next possible entry is */
2594 + for (tmp=nlen; tmp>15; tmp-=sizeof(afs_dirent_t)) {
2595 + if (next>=AFS_DIRENT_PER_BLOCK) {
2596 + _debug("ENT[%u.%u]:"
2597 + " %u travelled beyond end dir block (len %u/%u)\n",
2598 + blkoff/sizeof(afs_dir_block_t),offset,next,tmp,nlen);
2601 + if (!(block->pagehdr.bitmap[next/8] & (1 << (next % 8)))) {
2602 + _debug("ENT[%u.%u]: %u unmarked extension (len %u/%u)\n",
2603 + blkoff/sizeof(afs_dir_block_t),offset,next,tmp,nlen);
2607 + _debug("ENT[%u.%u]: ext %u/%u\n",
2608 + blkoff/sizeof(afs_dir_block_t),next,tmp,nlen);
2612 + /* skip if starts before the current position */
2616 + /* found the next entry */
2617 + ret = filldir(cookie,
2620 + blkoff + offset * sizeof(afs_dirent_t),
2621 + ntohl(dire->vnode),
2622 + filldir==afs_dir_lookup_filldir ? dire->unique : DT_UNKNOWN);
2624 + _leave(" = 0 [full]");
2628 + *fpos = blkoff + next * sizeof(afs_dirent_t);
2631 + _leave(" = 1 [more]");
2633 +} /* end afs_dir_iterate_block() */
2635 +/*****************************************************************************/
2637 + * read an AFS directory
2639 +static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, filldir_t filldir)
2641 + afs_dir_block_t *dblock;
2642 + afs_dir_page_t *dbuf;
2643 + struct page *page;
2644 + unsigned blkoff, limit;
2647 + _enter("{%lu},%u,,",dir->i_ino,*fpos);
2649 + if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
2650 + _leave(" = -ESTALE");
2654 + /* round the file position up to the next entry boundary */
2655 + *fpos += sizeof(afs_dirent_t) - 1;
2656 + *fpos &= ~(sizeof(afs_dirent_t) - 1);
2658 + /* walk through the blocks in sequence */
2660 + while (*fpos < dir->i_size) {
2661 + blkoff = *fpos & ~(sizeof(afs_dir_block_t) - 1);
2663 + /* fetch the appropriate page from the directory */
2664 + page = afs_dir_get_page(dir,blkoff/PAGE_SIZE);
2665 + if (IS_ERR(page)) {
2666 + ret = PTR_ERR(page);
2670 + limit = blkoff & ~(PAGE_SIZE-1);
2672 + dbuf = page_address(page);
2674 + /* deal with the individual blocks stashed on this page */
2676 + dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) / sizeof(afs_dir_block_t)];
2677 + ret = afs_dir_iterate_block(fpos,dblock,blkoff,cookie,filldir);
2679 + afs_dir_put_page(page);
2683 + blkoff += sizeof(afs_dir_block_t);
2685 + } while (*fpos < dir->i_size && blkoff < limit);
2687 + afs_dir_put_page(page);
2692 + _leave(" = %d",ret);
2694 +} /* end afs_dir_iterate() */
2696 +/*****************************************************************************/
2698 + * read an AFS directory
2700 +static int afs_dir_readdir(struct file *file, void *cookie, filldir_t filldir)
2705 + _enter("{%Ld,{%lu}}",file->f_pos,file->f_dentry->d_inode->i_ino);
2707 + fpos = file->f_pos;
2708 + ret = afs_dir_iterate(file->f_dentry->d_inode,&fpos,cookie,filldir);
2709 + file->f_pos = fpos;
2711 + _leave(" = %d",ret);
2713 +} /* end afs_dir_readdir() */
2715 +/*****************************************************************************/
2717 + * search the directory for a name
2718 + * - if afs_dir_iterate_block() spots this function, it'll pass the FID uniquifier through dtype
2720 +static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, loff_t fpos,
2721 + ino_t ino, unsigned dtype)
2723 + struct afs_dir_lookup_cookie *cookie = _cookie;
2725 + _enter("{%s,%u},%s,%u,,%lu,%u",cookie->name,cookie->nlen,name,nlen,ino,ntohl(dtype));
2727 + if (cookie->nlen != nlen || memcmp(cookie->name,name,nlen)!=0) {
2728 + _leave(" = 0 [no]");
2732 + cookie->fid.vnode = ino;
2733 + cookie->fid.unique = ntohl(dtype);
2734 + cookie->found = 1;
2736 + _leave(" = -1 [found]");
2738 +} /* end afs_dir_lookup_filldir() */
2740 +/*****************************************************************************/
2742 + * look up an entry in a directory
2744 +static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry)
2746 + struct afs_dir_lookup_cookie cookie;
2747 + struct afs_super_info *as;
2748 + struct inode *inode;
2749 + afs_vnode_t *vnode;
2753 + _enter("{%lu},{%s}",dir->i_ino,dentry->d_name.name);
2755 + /* insanity checks first */
2756 + if (sizeof(afs_dir_block_t) != 2048) BUG();
2757 + if (sizeof(afs_dirent_t) != 32) BUG();
2759 + if (dentry->d_name.len > 255) {
2760 + _leave(" = -ENAMETOOLONG");
2761 + return ERR_PTR(-ENAMETOOLONG);
2764 + vnode = AFS_FS_I(dir);
2765 + if (vnode->flags & AFS_VNODE_DELETED) {
2766 + _leave(" = -ESTALE");
2767 + return ERR_PTR(-ESTALE);
2770 + as = dir->i_sb->u.generic_sbp;
2772 + /* search the directory */
2773 + cookie.name = dentry->d_name.name;
2774 + cookie.nlen = dentry->d_name.len;
2775 + cookie.fid.vid = as->volume->vid;
2779 + ret = afs_dir_iterate(dir,&fpos,&cookie,afs_dir_lookup_filldir);
2781 + _leave(" = %d",ret);
2782 + return ERR_PTR(ret);
2786 + if (!cookie.found) {
2787 + _leave(" = %d",ret);
2788 + return ERR_PTR(ret);
2791 + /* instantiate the dentry */
2792 + ret = afs_iget(dir->i_sb,&cookie.fid,&inode);
2794 + _leave(" = %d",ret);
2795 + return ERR_PTR(ret);
2798 + dentry->d_op = &afs_fs_dentry_operations;
2799 + dentry->d_fsdata = (void*) (unsigned) vnode->status.version;
2801 + d_add(dentry,inode);
2802 + _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }",
2804 + cookie.fid.unique,
2805 + dentry->d_inode->i_ino,
2806 + dentry->d_inode->i_version);
2809 +} /* end afs_dir_lookup() */
2811 +/*****************************************************************************/
2813 + * check that a dentry lookup hit has found a valid entry
2814 + * - NOTE! the hit can be a negative hit too, so we can't assume we have an inode
2815 + * (derived from nfs_lookup_revalidate)
2817 +static int afs_d_revalidate(struct dentry *dentry, int flags)
2819 + struct afs_dir_lookup_cookie cookie;
2820 + struct dentry *parent;
2821 + struct inode *inode, *dir;
2825 + _enter("%s,%x",dentry->d_name.name,flags);
2827 + /* lock down the parent dentry so we can peer at it */
2828 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
2829 + read_lock(&dparent_lock);
2830 + parent = dget(dentry->d_parent);
2831 + read_unlock(&dparent_lock);
2834 + parent = dget(dentry->d_parent);
2838 + dir = parent->d_inode;
2839 + inode = dentry->d_inode;
2841 + /* handle a negative inode */
2845 + /* handle a bad inode */
2846 + if (is_bad_inode(inode)) {
2847 + printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n",
2848 + dentry->d_parent->d_name.name,dentry->d_name.name);
2852 + /* force a full look up if the parent directory changed since last the server was consulted
2853 + * - otherwise this inode must still exist, even if the inode details themselves have
2856 + if (AFS_FS_I(dir)->flags & AFS_VNODE_CHANGED)
2857 + afs_vnode_fetch_status(AFS_FS_I(dir));
2859 + if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
2860 + _debug("%s: parent dir deleted",dentry->d_name.name);
2864 + if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) {
2865 + _debug("%s: file already deleted",dentry->d_name.name);
2869 + if ((unsigned)dentry->d_fsdata != (unsigned)AFS_FS_I(dir)->status.version) {
2870 + _debug("%s: parent changed %u -> %u",
2871 + dentry->d_name.name,
2872 + (unsigned)dentry->d_fsdata,
2873 + (unsigned)AFS_FS_I(dir)->status.version);
2875 + /* search the directory for this vnode */
2876 + cookie.name = dentry->d_name.name;
2877 + cookie.nlen = dentry->d_name.len;
2878 + cookie.fid.vid = AFS_FS_I(inode)->volume->vid;
2882 + ret = afs_dir_iterate(dir,&fpos,&cookie,afs_dir_lookup_filldir);
2884 + _debug("failed to iterate dir %s: %d",parent->d_name.name,ret);
2888 + if (!cookie.found) {
2889 + _debug("%s: dirent not found",dentry->d_name.name);
2893 + /* if the vnode ID has changed, then the dirent points to a different file */
2894 + if (cookie.fid.vnode!=AFS_FS_I(inode)->fid.vnode) {
2895 + _debug("%s: dirent changed",dentry->d_name.name);
2899 + /* if the vnode ID uniqifier has changed, then the file has been deleted */
2900 + if (cookie.fid.unique!=AFS_FS_I(inode)->fid.unique) {
2901 + _debug("%s: file deleted (uq %u -> %u I:%lu)",
2902 + dentry->d_name.name,
2903 + cookie.fid.unique,
2904 + AFS_FS_I(inode)->fid.unique,
2905 + inode->i_version);
2906 + spin_lock(&AFS_FS_I(inode)->lock);
2907 + AFS_FS_I(inode)->flags |= AFS_VNODE_DELETED;
2908 + spin_unlock(&AFS_FS_I(inode)->lock);
2909 + invalidate_inode_pages(inode);
2913 + dentry->d_fsdata = (void*) (unsigned) AFS_FS_I(dir)->status.version;
2918 + _leave(" = 1 [valid]");
2921 + /* the dirent, if it exists, now points to a different vnode */
2923 + dentry->d_flags |= DCACHE_NFSFS_RENAMED;
2927 + /* don't unhash if we have submounts */
2928 + if (have_submounts(dentry))
2932 + shrink_dcache_parent(dentry);
2934 + _debug("dropping dentry %s/%s",dentry->d_parent->d_name.name,dentry->d_name.name);
2939 + _leave(" = 0 [bad]");
2941 +} /* end afs_d_revalidate() */
2943 +/*****************************************************************************/
2945 + * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't sleep)
2946 + * - called from dput() when d_count is going to 0.
2947 + * - return 1 to request dentry be unhashed, 0 otherwise
2949 +static int afs_d_delete(struct dentry *dentry)
2951 + _enter("%s",dentry->d_name.name);
2953 + if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
2956 + if (dentry->d_inode) {
2957 + if (AFS_FS_I(dentry->d_inode)->flags & AFS_VNODE_DELETED)
2961 + _leave(" = 0 [keep]");
2965 + _leave(" = 1 [zap]");
2967 +} /* end afs_d_delete() */
2968 diff -urNp linux-5240/fs/afs/errors.h linux-5250/fs/afs/errors.h
2969 --- linux-5240/fs/afs/errors.h 1970-01-01 01:00:00.000000000 +0100
2970 +++ linux-5250/fs/afs/errors.h
2972 +/* errors.h: AFS abort/error codes
2974 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
2975 + * Written by David Howells (dhowells@redhat.com)
2977 + * This program is free software; you can redistribute it and/or
2978 + * modify it under the terms of the GNU General Public License
2979 + * as published by the Free Software Foundation; either version
2980 + * 2 of the License, or (at your option) any later version.
2983 +#ifndef _H_DB712916_5113_11D6_9A6D_0002B3163499
2984 +#define _H_DB712916_5113_11D6_9A6D_0002B3163499
2988 +/* file server abort codes */
2990 + VSALVAGE = 101, /* volume needs salvaging */
2991 + VNOVNODE = 102, /* no such file/dir (vnode) */
2992 + VNOVOL = 103, /* no such volume or volume unavailable */
2993 + VVOLEXISTS = 104, /* volume name already exists */
2994 + VNOSERVICE = 105, /* volume not currently in service */
2995 + VOFFLINE = 106, /* volume is currently offline (more info available [VVL-spec]) */
2996 + VONLINE = 107, /* volume is already online */
2997 + VDISKFULL = 108, /* disk partition is full */
2998 + VOVERQUOTA = 109, /* volume's maximum quota exceeded */
2999 + VBUSY = 110, /* volume is temporarily unavailable */
3000 + VMOVED = 111, /* volume moved to new server - ask this FS where */
3001 +} afs_rxfs_abort_t;
3003 +extern int afs_abort_to_error(int abortcode);
3005 +#endif /* _H_DB712916_5113_11D6_9A6D_0002B3163499 */
3006 diff -urNp linux-5240/fs/afs/file.c linux-5250/fs/afs/file.c
3007 --- linux-5240/fs/afs/file.c 1970-01-01 01:00:00.000000000 +0100
3008 +++ linux-5250/fs/afs/file.c
3010 +/* file.c: AFS filesystem file handling
3012 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
3013 + * Written by David Howells (dhowells@redhat.com)
3015 + * This program is free software; you can redistribute it and/or
3016 + * modify it under the terms of the GNU General Public License
3017 + * as published by the Free Software Foundation; either version
3018 + * 2 of the License, or (at your option) any later version.
3021 +#include <linux/kernel.h>
3022 +#include <linux/module.h>
3023 +#include <linux/init.h>
3024 +#include <linux/sched.h>
3025 +#include <linux/slab.h>
3026 +#include <linux/fs.h>
3027 +#include <linux/pagemap.h>
3028 +#include "volume.h"
3030 +#include <rxrpc/call.h>
3031 +#include "internal.h"
3033 +//static int afs_file_open(struct inode *inode, struct file *file);
3034 +//static int afs_file_release(struct inode *inode, struct file *file);
3036 +static int afs_file_readpage(struct file *file, struct page *page);
3038 +//static ssize_t afs_file_read(struct file *file, char *buf, size_t size, loff_t *off);
3040 +static ssize_t afs_file_write(struct file *file, const char *buf, size_t size, loff_t *off);
3042 +struct inode_operations afs_file_inode_operations = {
3043 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
3044 + getattr: afs_inode_getattr,
3046 + revalidate: afs_inode_revalidate,
3050 +struct file_operations afs_file_file_operations = {
3051 +// open: afs_file_open,
3052 +// release: afs_file_release,
3053 + read: generic_file_read, //afs_file_read,
3054 + write: afs_file_write,
3055 + mmap: generic_file_mmap,
3056 +// fsync: afs_file_fsync,
3059 +struct address_space_operations afs_fs_aops = {
3060 + readpage: afs_file_readpage,
3063 +/*****************************************************************************/
3068 +static ssize_t afs_file_read(struct file *file, char *buf, size_t size, loff_t *off)
3070 + struct afs_inode_info *ai;
3072 + ai = AFS_FS_I(file->f_dentry->d_inode);
3073 + if (ai->flags & AFS_INODE_DELETED)
3077 +} /* end afs_file_read() */
3080 +/*****************************************************************************/
3084 +static ssize_t afs_file_write(struct file *file, const char *buf, size_t size, loff_t *off)
3086 + afs_vnode_t *vnode;
3088 + vnode = AFS_FS_I(file->f_dentry->d_inode);
3089 + if (vnode->flags & AFS_VNODE_DELETED)
3093 +} /* end afs_file_write() */
3095 +/*****************************************************************************/
3097 + * AFS read page from file (or symlink)
3099 +static int afs_file_readpage(struct file *file, struct page *page)
3101 + struct afs_rxfs_fetch_descriptor desc;
3102 + struct inode *inode;
3103 + afs_vnode_t *vnode;
3106 + inode = page->mapping->host;
3108 + _enter("{%lu},{%lu}",inode->i_ino,page->index);
3110 + vnode = AFS_FS_I(inode);
3112 + if (!PageLocked(page))
3116 + if (vnode->flags & AFS_VNODE_DELETED)
3119 + /* work out how much to get and from where */
3120 + desc.fid = vnode->fid;
3121 + desc.offset = page->index << PAGE_CACHE_SHIFT;
3122 + desc.size = min((size_t)(inode->i_size - desc.offset),(size_t)PAGE_SIZE);
3123 + desc.buffer = kmap(page);
3125 + clear_page(desc.buffer);
3127 + /* read the contents of the file from the server into the page */
3128 + ret = afs_vnode_fetch_data(vnode,&desc);
3131 + if (ret==-ENOENT) {
3132 + _debug("got NOENT from server - marking file deleted and stale");
3133 + vnode->flags |= AFS_VNODE_DELETED;
3139 + SetPageUptodate(page);
3140 + unlock_page(page);
3146 + SetPageError(page);
3147 + unlock_page(page);
3149 + _leave(" = %d",ret);
3152 +} /* end afs_file_readpage() */
3153 diff -urNp linux-5240/fs/afs/fsclient.c linux-5250/fs/afs/fsclient.c
3154 --- linux-5240/fs/afs/fsclient.c 1970-01-01 01:00:00.000000000 +0100
3155 +++ linux-5250/fs/afs/fsclient.c
3157 +/* fsclient.c: AFS File Server client stubs
3159 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
3160 + * Written by David Howells (dhowells@redhat.com)
3162 + * This program is free software; you can redistribute it and/or
3163 + * modify it under the terms of the GNU General Public License
3164 + * as published by the Free Software Foundation; either version
3165 + * 2 of the License, or (at your option) any later version.
3168 +#include <linux/init.h>
3169 +#include <linux/sched.h>
3170 +#include <rxrpc/rxrpc.h>
3171 +#include <rxrpc/transport.h>
3172 +#include <rxrpc/connection.h>
3173 +#include <rxrpc/call.h>
3174 +#include "fsclient.h"
3175 +#include "cmservice.h"
3177 +#include "server.h"
3178 +#include "errors.h"
3179 +#include "internal.h"
3181 +#define FSFETCHSTATUS 132 /* AFS Fetch file status */
3182 +#define FSFETCHDATA 130 /* AFS Fetch file data */
3183 +#define FSGIVEUPCALLBACKS 147 /* AFS Discard server callback promises */
3184 +#define FSGETVOLUMEINFO 148 /* AFS Get root volume information */
3185 +#define FSGETROOTVOLUME 151 /* AFS Get root volume name */
3186 +#define FSLOOKUP 161 /* AFS lookup file in directory */
3188 +/*****************************************************************************/
3190 + * map afs abort codes to/from Linux error codes
3191 + * - called with call->lock held
3193 +static void afs_rxfs_aemap(struct rxrpc_call *call)
3195 + switch (call->app_err_state) {
3196 + case RXRPC_ESTATE_LOCAL_ABORT:
3197 + call->app_abort_code = -call->app_errno;
3199 + case RXRPC_ESTATE_PEER_ABORT:
3200 + call->app_errno = afs_abort_to_error(call->app_abort_code);
3205 +} /* end afs_rxfs_aemap() */
3207 +/*****************************************************************************/
3209 + * get the root volume name from a fileserver
3210 + * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
3213 +int afs_rxfs_get_root_volume(afs_server_t *server, char *buf, size_t *buflen)
3215 + DECLARE_WAITQUEUE(myself,current);
3217 + struct rxrpc_connection *conn;
3218 + struct rxrpc_call *call;
3219 + struct iovec piov[2];
3224 + kenter("%p,%p,%u",server,buf,*buflen);
3226 + /* get hold of the fileserver connection */
3227 + ret = afs_server_get_fsconn(server,&conn);
3231 + /* create a call through that connection */
3232 + ret = rxrpc_create_call(conn,NULL,NULL,afs_rxfs_aemap,&call);
3234 + printk("kAFS: Unable to create call: %d\n",ret);
3235 + goto out_put_conn;
3237 + call->app_opcode = FSGETROOTVOLUME;
3239 + /* we want to get event notifications from the call */
3240 + add_wait_queue(&call->waitq,&myself);
3242 + /* marshall the parameters */
3243 + param[0] = htonl(FSGETROOTVOLUME);
3245 + piov[0].iov_len = sizeof(param);
3246 + piov[0].iov_base = param;
3248 + /* send the parameters to the server */
3249 + ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3253 + /* wait for the reply to completely arrive */
3255 + set_current_state(TASK_INTERRUPTIBLE);
3256 + if (call->app_call_state!=RXRPC_CSTATE_CLNT_RCV_REPLY ||
3257 + signal_pending(current))
3261 + set_current_state(TASK_RUNNING);
3264 + if (signal_pending(current))
3267 + switch (call->app_call_state) {
3268 + case RXRPC_CSTATE_ERROR:
3269 + ret = call->app_errno;
3270 + kdebug("Got Error: %d",ret);
3273 + case RXRPC_CSTATE_CLNT_GOT_REPLY:
3274 + /* read the reply */
3275 + kdebug("Got Reply: qty=%d",call->app_ready_qty);
3278 + if (call->app_ready_qty <= 4)
3281 + ret = rxrpc_call_read_data(call,NULL,call->app_ready_qty,0);
3286 + /* unmarshall the reply */
3288 + for (loop=0; loop<65; loop++)
3289 + entry->name[loop] = ntohl(*bp++);
3290 + entry->name[64] = 0;
3292 + entry->type = ntohl(*bp++);
3293 + entry->num_servers = ntohl(*bp++);
3295 + for (loop=0; loop<8; loop++)
3296 + entry->servers[loop].addr.s_addr = *bp++;
3298 + for (loop=0; loop<8; loop++)
3299 + entry->servers[loop].partition = ntohl(*bp++);
3301 + for (loop=0; loop<8; loop++)
3302 + entry->servers[loop].flags = ntohl(*bp++);
3304 + for (loop=0; loop<3; loop++)
3305 + entry->volume_ids[loop] = ntohl(*bp++);
3307 + entry->clone_id = ntohl(*bp++);
3308 + entry->flags = ntohl(*bp);
3320 + set_current_state(TASK_UNINTERRUPTIBLE);
3321 + rxrpc_call_abort(call,ret);
3324 + set_current_state(TASK_RUNNING);
3325 + remove_wait_queue(&call->waitq,&myself);
3326 + rxrpc_put_call(call);
3328 + afs_server_release_fsconn(server,conn);
3332 +} /* end afs_rxfs_get_root_volume() */
3335 +/*****************************************************************************/
3337 + * get information about a volume
3340 +int afs_rxfs_get_volume_info(afs_server_t *server,
3342 + afs_volume_info_t *vinfo)
3344 + DECLARE_WAITQUEUE(myself,current);
3346 + struct rxrpc_connection *conn;
3347 + struct rxrpc_call *call;
3348 + struct iovec piov[3];
3351 + u32 param[2], *bp, zero;
3353 + _enter("%p,%s,%p",server,name,vinfo);
3355 + /* get hold of the fileserver connection */
3356 + ret = afs_server_get_fsconn(server,&conn);
3360 + /* create a call through that connection */
3361 + ret = rxrpc_create_call(conn,NULL,NULL,afs_rxfs_aemap,&call);
3363 + printk("kAFS: Unable to create call: %d\n",ret);
3364 + goto out_put_conn;
3366 + call->app_opcode = FSGETVOLUMEINFO;
3368 + /* we want to get event notifications from the call */
3369 + add_wait_queue(&call->waitq,&myself);
3371 + /* marshall the parameters */
3372 + piov[1].iov_len = strlen(name);
3373 + piov[1].iov_base = (char*)name;
3376 + piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
3377 + piov[2].iov_base = &zero;
3379 + param[0] = htonl(FSGETVOLUMEINFO);
3380 + param[1] = htonl(piov[1].iov_len);
3382 + piov[0].iov_len = sizeof(param);
3383 + piov[0].iov_base = param;
3385 + /* send the parameters to the server */
3386 + ret = rxrpc_call_write_data(call,3,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3390 + /* wait for the reply to completely arrive */
3391 + bp = rxrpc_call_alloc_scratch(call,64);
3393 + ret = rxrpc_call_read_data(call,bp,64,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
3395 + if (ret==-ECONNABORTED) {
3396 + ret = call->app_errno;
3402 + /* unmarshall the reply */
3403 + vinfo->vid = ntohl(*bp++);
3404 + vinfo->type = ntohl(*bp++);
3406 + vinfo->type_vids[0] = ntohl(*bp++);
3407 + vinfo->type_vids[1] = ntohl(*bp++);
3408 + vinfo->type_vids[2] = ntohl(*bp++);
3409 + vinfo->type_vids[3] = ntohl(*bp++);
3410 + vinfo->type_vids[4] = ntohl(*bp++);
3412 + vinfo->nservers = ntohl(*bp++);
3413 + vinfo->servers[0].addr.s_addr = *bp++;
3414 + vinfo->servers[1].addr.s_addr = *bp++;
3415 + vinfo->servers[2].addr.s_addr = *bp++;
3416 + vinfo->servers[3].addr.s_addr = *bp++;
3417 + vinfo->servers[4].addr.s_addr = *bp++;
3418 + vinfo->servers[5].addr.s_addr = *bp++;
3419 + vinfo->servers[6].addr.s_addr = *bp++;
3420 + vinfo->servers[7].addr.s_addr = *bp++;
3423 + if (vinfo->nservers>8)
3430 + set_current_state(TASK_RUNNING);
3431 + remove_wait_queue(&call->waitq,&myself);
3432 + rxrpc_put_call(call);
3434 + afs_server_release_fsconn(server,conn);
3440 + set_current_state(TASK_UNINTERRUPTIBLE);
3441 + rxrpc_call_abort(call,ret);
3445 +} /* end afs_rxfs_get_volume_info() */
3448 +/*****************************************************************************/
3450 + * fetch the status information for a file
3452 +int afs_rxfs_fetch_file_status(afs_server_t *server,
3453 + afs_vnode_t *vnode,
3454 + afs_volsync_t *volsync)
3456 + DECLARE_WAITQUEUE(myself,current);
3458 + struct afs_server_callslot callslot;
3459 + struct rxrpc_call *call;
3460 + struct iovec piov[1];
3465 + _enter("%p,{%u,%u,%u}",server,vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
3467 + /* get hold of the fileserver connection */
3468 + ret = afs_server_request_callslot(server,&callslot);
3472 + /* create a call through that connection */
3473 + ret = rxrpc_create_call(callslot.conn,NULL,NULL,afs_rxfs_aemap,&call);
3475 + printk("kAFS: Unable to create call: %d\n",ret);
3476 + goto out_put_conn;
3478 + call->app_opcode = FSFETCHSTATUS;
3480 + /* we want to get event notifications from the call */
3481 + add_wait_queue(&call->waitq,&myself);
3483 + /* marshall the parameters */
3484 + bp = rxrpc_call_alloc_scratch(call,16);
3485 + bp[0] = htonl(FSFETCHSTATUS);
3486 + bp[1] = htonl(vnode->fid.vid);
3487 + bp[2] = htonl(vnode->fid.vnode);
3488 + bp[3] = htonl(vnode->fid.unique);
3490 + piov[0].iov_len = 16;
3491 + piov[0].iov_base = bp;
3493 + /* send the parameters to the server */
3494 + ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3498 + /* wait for the reply to completely arrive */
3499 + bp = rxrpc_call_alloc_scratch(call,120);
3501 + ret = rxrpc_call_read_data(call,bp,120,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
3503 + if (ret==-ECONNABORTED) {
3504 + ret = call->app_errno;
3510 + /* unmarshall the reply */
3511 + vnode->status.if_version = ntohl(*bp++);
3512 + vnode->status.type = ntohl(*bp++);
3513 + vnode->status.nlink = ntohl(*bp++);
3514 + vnode->status.size = ntohl(*bp++);
3515 + vnode->status.version = ntohl(*bp++);
3516 + vnode->status.author = ntohl(*bp++);
3517 + vnode->status.owner = ntohl(*bp++);
3518 + vnode->status.caller_access = ntohl(*bp++);
3519 + vnode->status.anon_access = ntohl(*bp++);
3520 + vnode->status.mode = ntohl(*bp++);
3521 + vnode->status.parent.vid = vnode->fid.vid;
3522 + vnode->status.parent.vnode = ntohl(*bp++);
3523 + vnode->status.parent.unique = ntohl(*bp++);
3524 + bp++; /* seg size */
3525 + vnode->status.mtime_client = ntohl(*bp++);
3526 + vnode->status.mtime_server = ntohl(*bp++);
3528 + bp++; /* sync counter */
3529 + vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
3530 + bp++; /* spare2 */
3531 + bp++; /* spare3 */
3532 + bp++; /* spare4 */
3534 + vnode->cb_version = ntohl(*bp++);
3535 + vnode->cb_expiry = ntohl(*bp++);
3536 + vnode->cb_type = ntohl(*bp++);
3539 + volsync->creation = ntohl(*bp++);
3540 + bp++; /* spare2 */
3541 + bp++; /* spare3 */
3542 + bp++; /* spare4 */
3543 + bp++; /* spare5 */
3544 + bp++; /* spare6 */
3551 + set_current_state(TASK_RUNNING);
3552 + remove_wait_queue(&call->waitq,&myself);
3553 + rxrpc_put_call(call);
3555 + afs_server_release_callslot(server,&callslot);
3561 + set_current_state(TASK_UNINTERRUPTIBLE);
3562 + rxrpc_call_abort(call,ret);
3565 +} /* end afs_rxfs_fetch_file_status() */
3567 +/*****************************************************************************/
3569 + * fetch the contents of a file or directory
3571 +int afs_rxfs_fetch_file_data(afs_server_t *server,
3572 + afs_vnode_t *vnode,
3573 + struct afs_rxfs_fetch_descriptor *desc,
3574 + afs_volsync_t *volsync)
3576 + DECLARE_WAITQUEUE(myself,current);
3578 + struct afs_server_callslot callslot;
3579 + struct rxrpc_call *call;
3580 + struct iovec piov[1];
3585 + _enter("%p,{fid={%u,%u,%u},sz=%u,of=%lu}",
3593 + /* get hold of the fileserver connection */
3594 + ret = afs_server_request_callslot(server,&callslot);
3598 + /* create a call through that connection */
3599 + ret = rxrpc_create_call(callslot.conn,NULL,NULL,afs_rxfs_aemap,&call);
3601 + printk("kAFS: Unable to create call: %d\n",ret);
3602 + goto out_put_conn;
3604 + call->app_opcode = FSFETCHDATA;
3606 + /* we want to get event notifications from the call */
3607 + add_wait_queue(&call->waitq,&myself);
3609 + /* marshall the parameters */
3610 + bp = rxrpc_call_alloc_scratch(call,24);
3611 + bp[0] = htonl(FSFETCHDATA);
3612 + bp[1] = htonl(desc->fid.vid);
3613 + bp[2] = htonl(desc->fid.vnode);
3614 + bp[3] = htonl(desc->fid.unique);
3615 + bp[4] = htonl(desc->offset);
3616 + bp[5] = htonl(desc->size);
3618 + piov[0].iov_len = 24;
3619 + piov[0].iov_base = bp;
3621 + /* send the parameters to the server */
3622 + ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3626 + /* wait for the data count to arrive */
3627 + ret = rxrpc_call_read_data(call,bp,4,RXRPC_CALL_READ_BLOCK);
3631 + desc->actual = ntohl(bp[0]);
3632 + if (desc->actual!=desc->size) {
3637 + /* call the app to read the actual data */
3638 + rxrpc_call_reset_scratch(call);
3640 + ret = rxrpc_call_read_data(call,desc->buffer,desc->actual,RXRPC_CALL_READ_BLOCK);
3644 + /* wait for the rest of the reply to completely arrive */
3645 + rxrpc_call_reset_scratch(call);
3646 + bp = rxrpc_call_alloc_scratch(call,120);
3648 + ret = rxrpc_call_read_data(call,bp,120,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
3652 + /* unmarshall the reply */
3653 + vnode->status.if_version = ntohl(*bp++);
3654 + vnode->status.type = ntohl(*bp++);
3655 + vnode->status.nlink = ntohl(*bp++);
3656 + vnode->status.size = ntohl(*bp++);
3657 + vnode->status.version = ntohl(*bp++);
3658 + vnode->status.author = ntohl(*bp++);
3659 + vnode->status.owner = ntohl(*bp++);
3660 + vnode->status.caller_access = ntohl(*bp++);
3661 + vnode->status.anon_access = ntohl(*bp++);
3662 + vnode->status.mode = ntohl(*bp++);
3663 + vnode->status.parent.vid = desc->fid.vid;
3664 + vnode->status.parent.vnode = ntohl(*bp++);
3665 + vnode->status.parent.unique = ntohl(*bp++);
3666 + bp++; /* seg size */
3667 + vnode->status.mtime_client = ntohl(*bp++);
3668 + vnode->status.mtime_server = ntohl(*bp++);
3670 + bp++; /* sync counter */
3671 + vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
3672 + bp++; /* spare2 */
3673 + bp++; /* spare3 */
3674 + bp++; /* spare4 */
3676 + vnode->cb_version = ntohl(*bp++);
3677 + vnode->cb_expiry = ntohl(*bp++);
3678 + vnode->cb_type = ntohl(*bp++);
3681 + volsync->creation = ntohl(*bp++);
3682 + bp++; /* spare2 */
3683 + bp++; /* spare3 */
3684 + bp++; /* spare4 */
3685 + bp++; /* spare5 */
3686 + bp++; /* spare6 */
3693 + set_current_state(TASK_RUNNING);
3694 + remove_wait_queue(&call->waitq,&myself);
3695 + rxrpc_put_call(call);
3697 + afs_server_release_callslot(server,&callslot);
3699 + _leave(" = %d",ret);
3703 + if (ret==-ECONNABORTED) {
3704 + ret = call->app_errno;
3709 + set_current_state(TASK_UNINTERRUPTIBLE);
3710 + rxrpc_call_abort(call,ret);
3714 +} /* end afs_rxfs_fetch_file_data() */
3716 +/*****************************************************************************/
3718 + * ask the AFS fileserver to discard a callback request on a file
3720 +int afs_rxfs_give_up_callback(afs_server_t *server, afs_vnode_t *vnode)
3722 + DECLARE_WAITQUEUE(myself,current);
3724 + struct afs_server_callslot callslot;
3725 + struct rxrpc_call *call;
3726 + struct iovec piov[1];
3731 + _enter("%p,{%u,%u,%u}",server,vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
3733 + /* get hold of the fileserver connection */
3734 + ret = afs_server_request_callslot(server,&callslot);
3738 + /* create a call through that connection */
3739 + ret = rxrpc_create_call(callslot.conn,NULL,NULL,afs_rxfs_aemap,&call);
3741 + printk("kAFS: Unable to create call: %d\n",ret);
3742 + goto out_put_conn;
3744 + call->app_opcode = FSGIVEUPCALLBACKS;
3746 + /* we want to get event notifications from the call */
3747 + add_wait_queue(&call->waitq,&myself);
3749 + /* marshall the parameters */
3750 + bp = rxrpc_call_alloc_scratch(call,(1+4+4)*4);
3752 + piov[0].iov_len = (1+4+4)*4;
3753 + piov[0].iov_base = bp;
3755 + *bp++ = htonl(FSGIVEUPCALLBACKS);
3757 + *bp++ = htonl(vnode->fid.vid);
3758 + *bp++ = htonl(vnode->fid.vnode);
3759 + *bp++ = htonl(vnode->fid.unique);
3761 + *bp++ = htonl(vnode->cb_version);
3762 + *bp++ = htonl(vnode->cb_expiry);
3763 + *bp++ = htonl(vnode->cb_type);
3765 + /* send the parameters to the server */
3766 + ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3770 + /* wait for the reply to completely arrive */
3772 + set_current_state(TASK_INTERRUPTIBLE);
3773 + if (call->app_call_state!=RXRPC_CSTATE_CLNT_RCV_REPLY ||
3774 + signal_pending(current))
3778 + set_current_state(TASK_RUNNING);
3781 + if (signal_pending(current))
3784 + switch (call->app_call_state) {
3785 + case RXRPC_CSTATE_ERROR:
3786 + ret = call->app_errno;
3789 + case RXRPC_CSTATE_CLNT_GOT_REPLY:
3798 + set_current_state(TASK_RUNNING);
3799 + remove_wait_queue(&call->waitq,&myself);
3800 + rxrpc_put_call(call);
3802 + afs_server_release_callslot(server,&callslot);
3808 + set_current_state(TASK_UNINTERRUPTIBLE);
3809 + rxrpc_call_abort(call,ret);
3812 +} /* end afs_rxfs_give_up_callback() */
3814 +/*****************************************************************************/
3816 + * look a filename up in a directory
3817 + * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
3820 +int afs_rxfs_lookup(afs_server_t *server,
3822 + const char *filename,
3823 + afs_vnode_t *vnode,
3824 + afs_volsync_t *volsync)
3826 + DECLARE_WAITQUEUE(myself,current);
3828 + struct rxrpc_connection *conn;
3829 + struct rxrpc_call *call;
3830 + struct iovec piov[3];
3835 + kenter("%p,{%u,%u,%u},%s",server,fid->vid,fid->vnode,fid->unique,filename);
3837 + /* get hold of the fileserver connection */
3838 + ret = afs_server_get_fsconn(server,&conn);
3842 + /* create a call through that connection */
3843 + ret = rxrpc_create_call(conn,NULL,NULL,afs_rxfs_aemap,&call);
3845 + printk("kAFS: Unable to create call: %d\n",ret);
3846 + goto out_put_conn;
3848 + call->app_opcode = FSLOOKUP;
3850 + /* we want to get event notifications from the call */
3851 + add_wait_queue(&call->waitq,&myself);
3853 + /* marshall the parameters */
3854 + bp = rxrpc_call_alloc_scratch(call,20);
3858 + piov[0].iov_len = 20;
3859 + piov[0].iov_base = bp;
3860 + piov[1].iov_len = strlen(filename);
3861 + piov[1].iov_base = (char*) filename;
3862 + piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
3863 + piov[2].iov_base = &zero;
3865 + *bp++ = htonl(FSLOOKUP);
3866 + *bp++ = htonl(dirfid->vid);
3867 + *bp++ = htonl(dirfid->vnode);
3868 + *bp++ = htonl(dirfid->unique);
3869 + *bp++ = htonl(piov[1].iov_len);
3871 + /* send the parameters to the server */
3872 + ret = rxrpc_call_write_data(call,3,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
3876 + /* wait for the reply to completely arrive */
3877 + bp = rxrpc_call_alloc_scratch(call,220);
3879 + ret = rxrpc_call_read_data(call,bp,220,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
3881 + if (ret==-ECONNABORTED) {
3882 + ret = call->app_errno;
3888 + /* unmarshall the reply */
3889 + fid->vid = ntohl(*bp++);
3890 + fid->vnode = ntohl(*bp++);
3891 + fid->unique = ntohl(*bp++);
3893 + vnode->status.if_version = ntohl(*bp++);
3894 + vnode->status.type = ntohl(*bp++);
3895 + vnode->status.nlink = ntohl(*bp++);
3896 + vnode->status.size = ntohl(*bp++);
3897 + vnode->status.version = ntohl(*bp++);
3898 + vnode->status.author = ntohl(*bp++);
3899 + vnode->status.owner = ntohl(*bp++);
3900 + vnode->status.caller_access = ntohl(*bp++);
3901 + vnode->status.anon_access = ntohl(*bp++);
3902 + vnode->status.mode = ntohl(*bp++);
3903 + vnode->status.parent.vid = dirfid->vid;
3904 + vnode->status.parent.vnode = ntohl(*bp++);
3905 + vnode->status.parent.unique = ntohl(*bp++);
3906 + bp++; /* seg size */
3907 + vnode->status.mtime_client = ntohl(*bp++);
3908 + vnode->status.mtime_server = ntohl(*bp++);
3910 + bp++; /* sync counter */
3911 + vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
3912 + bp++; /* spare2 */
3913 + bp++; /* spare3 */
3914 + bp++; /* spare4 */
3916 + dir->status.if_version = ntohl(*bp++);
3917 + dir->status.type = ntohl(*bp++);
3918 + dir->status.nlink = ntohl(*bp++);
3919 + dir->status.size = ntohl(*bp++);
3920 + dir->status.version = ntohl(*bp++);
3921 + dir->status.author = ntohl(*bp++);
3922 + dir->status.owner = ntohl(*bp++);
3923 + dir->status.caller_access = ntohl(*bp++);
3924 + dir->status.anon_access = ntohl(*bp++);
3925 + dir->status.mode = ntohl(*bp++);
3926 + dir->status.parent.vid = dirfid->vid;
3927 + dir->status.parent.vnode = ntohl(*bp++);
3928 + dir->status.parent.unique = ntohl(*bp++);
3929 + bp++; /* seg size */
3930 + dir->status.mtime_client = ntohl(*bp++);
3931 + dir->status.mtime_server = ntohl(*bp++);
3933 + bp++; /* sync counter */
3934 + dir->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
3935 + bp++; /* spare2 */
3936 + bp++; /* spare3 */
3937 + bp++; /* spare4 */
3939 + callback->fid = *fid;
3940 + callback->version = ntohl(*bp++);
3941 + callback->expiry = ntohl(*bp++);
3942 + callback->type = ntohl(*bp++);
3945 + volsync->creation = ntohl(*bp++);
3946 + bp++; /* spare2 */
3947 + bp++; /* spare3 */
3948 + bp++; /* spare4 */
3949 + bp++; /* spare5 */
3950 + bp++; /* spare6 */
3957 + set_current_state(TASK_RUNNING);
3958 + remove_wait_queue(&call->waitq,&myself);
3959 + rxrpc_put_call(call);
3961 + afs_server_release_fsconn(server,conn);
3967 + set_current_state(TASK_UNINTERRUPTIBLE);
3968 + rxrpc_call_abort(call,ret);
3971 +} /* end afs_rxfs_lookup() */
3973 diff -urNp linux-5240/fs/afs/fsclient.h linux-5250/fs/afs/fsclient.h
3974 --- linux-5240/fs/afs/fsclient.h 1970-01-01 01:00:00.000000000 +0100
3975 +++ linux-5250/fs/afs/fsclient.h
3977 +/* fsclient.h: AFS File Server client stub declarations
3979 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
3980 + * Written by David Howells (dhowells@redhat.com)
3982 + * This program is free software; you can redistribute it and/or
3983 + * modify it under the terms of the GNU General Public License
3984 + * as published by the Free Software Foundation; either version
3985 + * 2 of the License, or (at your option) any later version.
3988 +#ifndef _LINUX_AFS_FSCLIENT_H
3989 +#define _LINUX_AFS_FSCLIENT_H
3991 +#include "server.h"
3993 +extern int afs_rxfs_get_volume_info(afs_server_t *server,
3995 + afs_volume_info_t *vinfo);
3997 +extern int afs_rxfs_fetch_file_status(afs_server_t *server,
3998 + afs_vnode_t *vnode,
3999 + afs_volsync_t *volsync);
4001 +struct afs_rxfs_fetch_descriptor {
4002 + afs_fid_t fid; /* file ID to fetch */
4003 + size_t size; /* total number of bytes to fetch */
4004 + off_t offset; /* offset in file to start from */
4005 + void *buffer; /* read buffer */
4006 + size_t actual; /* actual size sent back by server */
4009 +extern int afs_rxfs_fetch_file_data(afs_server_t *server,
4010 + afs_vnode_t *vnode,
4011 + struct afs_rxfs_fetch_descriptor *desc,
4012 + afs_volsync_t *volsync);
4014 +extern int afs_rxfs_give_up_callback(afs_server_t *server, afs_vnode_t *vnode);
4016 +/* this doesn't appear to work in OpenAFS server */
4017 +extern int afs_rxfs_lookup(afs_server_t *server,
4019 + const char *filename,
4020 + afs_vnode_t *vnode,
4021 + afs_volsync_t *volsync);
4023 +/* this is apparently mis-implemented in OpenAFS server */
4024 +extern int afs_rxfs_get_root_volume(afs_server_t *server,
4029 +#endif /* _LINUX_AFS_FSCLIENT_H */
4030 diff -urNp linux-5240/fs/afs/inode.c linux-5250/fs/afs/inode.c
4031 --- linux-5240/fs/afs/inode.c 1970-01-01 01:00:00.000000000 +0100
4032 +++ linux-5250/fs/afs/inode.c
4035 + * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
4037 + * This software may be freely redistributed under the terms of the
4038 + * GNU General Public License.
4040 + * You should have received a copy of the GNU General Public License
4041 + * along with this program; if not, write to the Free Software
4042 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
4044 + * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
4045 + * David Howells <dhowells@redhat.com>
4049 +#include <linux/kernel.h>
4050 +#include <linux/module.h>
4051 +#include <linux/init.h>
4052 +#include <linux/sched.h>
4053 +#include <linux/slab.h>
4054 +#include <linux/fs.h>
4055 +#include <linux/pagemap.h>
4056 +#include "volume.h"
4060 +#include "internal.h"
4062 +struct afs_iget_data {
4064 + afs_volume_t *volume; /* volume on which resides */
4065 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
4066 + afs_vnode_t *new_vnode; /* new vnode record */
4070 +/*****************************************************************************/
4072 + * map the AFS file status to the inode member variables
4074 +static int afs_inode_map_status(afs_vnode_t *vnode)
4076 + struct inode *inode = AFS_VNODE_TO_I(vnode);
4078 + _debug("FS: ft=%d lk=%d sz=%u ver=%Lu mod=%hu",
4079 + vnode->status.type,
4080 + vnode->status.nlink,
4081 + vnode->status.size,
4082 + vnode->status.version,
4083 + vnode->status.mode);
4085 + switch (vnode->status.type) {
4086 + case AFS_FTYPE_FILE:
4087 + inode->i_mode = S_IFREG | vnode->status.mode;
4088 + inode->i_op = &afs_file_inode_operations;
4089 + inode->i_fop = &afs_file_file_operations;
4091 + case AFS_FTYPE_DIR:
4092 + inode->i_mode = S_IFDIR | vnode->status.mode;
4093 + inode->i_op = &afs_dir_inode_operations;
4094 + inode->i_fop = &afs_dir_file_operations;
4096 + case AFS_FTYPE_SYMLINK:
4097 + inode->i_mode = S_IFLNK | vnode->status.mode;
4098 + inode->i_op = &page_symlink_inode_operations;
4101 + printk("kAFS: AFS vnode with undefined type\n");
4105 + inode->i_nlink = vnode->status.nlink;
4106 + inode->i_uid = vnode->status.owner;
4108 + inode->i_rdev = NODEV;
4109 + inode->i_size = vnode->status.size;
4110 + inode->i_atime = inode->i_mtime = inode->i_ctime = vnode->status.mtime_server;
4111 + inode->i_blksize = PAGE_CACHE_SIZE;
4112 + inode->i_blocks = 0;
4113 + inode->i_version = vnode->fid.unique;
4114 + inode->i_mapping->a_ops = &afs_fs_aops;
4116 + /* check to see whether a symbolic link is really a mountpoint */
4117 + if (vnode->status.type==AFS_FTYPE_SYMLINK) {
4118 + afs_mntpt_check_symlink(vnode);
4120 + if (vnode->flags & AFS_VNODE_MOUNTPOINT) {
4121 + inode->i_mode = S_IFDIR | vnode->status.mode;
4122 + inode->i_op = &afs_mntpt_inode_operations;
4123 + inode->i_fop = &afs_mntpt_file_operations;
4128 +} /* end afs_inode_map_status() */
4130 +/*****************************************************************************/
4132 + * attempt to fetch the status of an inode, coelescing multiple simultaneous fetches
4134 +int afs_inode_fetch_status(struct inode *inode)
4136 + afs_vnode_t *vnode;
4139 + vnode = AFS_FS_I(inode);
4141 + ret = afs_vnode_fetch_status(vnode);
4144 + ret = afs_inode_map_status(vnode);
4148 +} /* end afs_inode_fetch_status() */
4150 +/*****************************************************************************/
4152 + * iget5() comparator
4154 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
4155 +static int afs_iget5_test(struct inode *inode, void *opaque)
4157 + struct afs_iget_data *data = opaque;
4159 + /* only match inodes with the same version number */
4160 + return inode->i_ino==data->fid.vnode && inode->i_version==data->fid.unique;
4161 +} /* end afs_iget5_test() */
4164 +/*****************************************************************************/
4166 + * iget5() inode initialiser
4168 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
4169 +static int afs_iget5_set(struct inode *inode, void *opaque)
4171 + struct afs_iget_data *data = opaque;
4172 + afs_vnode_t *vnode = AFS_FS_I(inode);
4174 + inode->i_ino = data->fid.vnode;
4175 + inode->i_version = data->fid.unique;
4176 + vnode->fid = data->fid;
4177 + vnode->volume = data->volume;
4180 +} /* end afs_iget5_set() */
4183 +/*****************************************************************************/
4185 + * iget4() comparator
4187 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
4188 +static int afs_iget4_test(struct inode *inode, ino_t ino, void *opaque)
4190 + struct afs_iget_data *data = opaque;
4192 + /* only match inodes with the same version number */
4193 + return inode->i_ino==data->fid.vnode && inode->i_version==data->fid.unique;
4194 +} /* end afs_iget4_test() */
4197 +/*****************************************************************************/
4199 + * read an inode (2.4 only)
4201 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
4202 +void afs_read_inode2(struct inode *inode, void *opaque)
4204 + struct afs_iget_data *data = opaque;
4205 + afs_vnode_t *vnode;
4208 + kenter(",{{%u,%u,%u},%p}",data->fid.vid,data->fid.vnode,data->fid.unique,data->volume);
4210 + if (inode->u.generic_ip) BUG();
4212 + /* attach a pre-allocated vnode record */
4213 + inode->u.generic_ip = vnode = data->new_vnode;
4214 + data->new_vnode = NULL;
4216 + memset(vnode,0,sizeof(*vnode));
4217 + vnode->inode = inode;
4218 + init_waitqueue_head(&vnode->update_waitq);
4219 + spin_lock_init(&vnode->lock);
4220 + INIT_LIST_HEAD(&vnode->cb_link);
4221 + INIT_LIST_HEAD(&vnode->cb_hash_link);
4222 + afs_timer_init(&vnode->cb_timeout,&afs_vnode_cb_timed_out_ops);
4223 + vnode->flags |= AFS_VNODE_CHANGED;
4224 + vnode->volume = data->volume;
4225 + vnode->fid = data->fid;
4227 + /* ask the server for a status check */
4228 + ret = afs_vnode_fetch_status(vnode);
4230 + make_bad_inode(inode);
4231 + kleave(" [bad inode]");
4235 + ret = afs_inode_map_status(vnode);
4237 + make_bad_inode(inode);
4238 + kleave(" [bad inode]");
4244 +} /* end afs_read_inode2() */
4247 +/*****************************************************************************/
4251 +inline int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inode)
4253 + struct afs_iget_data data = { fid: *fid };
4254 + struct afs_super_info *as;
4255 + struct inode *inode;
4256 + afs_vnode_t *vnode;
4259 + kenter(",{%u,%u,%u},,",fid->vid,fid->vnode,fid->unique);
4261 + as = sb->u.generic_sbp;
4262 + data.volume = as->volume;
4264 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
4265 + inode = iget5_locked(sb,fid->vnode,afs_iget5_test,afs_iget5_set,&data);
4267 + _leave(" = -ENOMEM");
4271 + vnode = AFS_FS_I(inode);
4273 + /* deal with an existing inode */
4274 + if (!(inode->i_state & I_NEW)) {
4275 + ret = afs_vnode_fetch_status(vnode);
4280 + _leave(" = %d",ret);
4284 + /* okay... it's a new inode */
4285 + vnode->flags |= AFS_VNODE_CHANGED;
4286 + ret = afs_inode_fetch_status(inode);
4291 + /* find a cache entry for it */
4292 + ret = afs_cache_lookup_vnode(as->volume,vnode);
4298 + unlock_new_inode(inode);
4301 + _leave(" = 0 [CB { v=%u x=%lu t=%u nix=%u }]",
4302 + vnode->cb_version,
4303 + vnode->cb_timeout.timo_jif,
4311 + make_bad_inode(inode);
4312 + unlock_new_inode(inode);
4315 + _leave(" = %d [bad]",ret);
4320 + /* pre-allocate a vnode record so that afs_read_inode2() doesn't have to return an inode
4321 + * without one attached
4323 + data.new_vnode = kmalloc(sizeof(afs_vnode_t),GFP_KERNEL);
4324 + if (!data.new_vnode) {
4325 + kleave(" = -ENOMEM");
4329 + inode = iget4(sb,fid->vnode,afs_iget4_test,&data);
4330 + if (data.new_vnode) kfree(data.new_vnode);
4332 + kleave(" = -ENOMEM");
4336 + vnode = AFS_FS_I(inode);
4338 + kleave(" = 0 [CB { v=%u x=%lu t=%u nix=%u }]",
4339 + vnode->cb_version,
4340 + vnode->cb_timeout.timo_jif,
4346 +} /* end afs_iget() */
4348 +/*****************************************************************************/
4350 + * read the attributes of an inode
4352 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
4353 +int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
4355 + struct inode *inode;
4356 + afs_vnode_t *vnode;
4359 + inode = dentry->d_inode;
4361 + _enter("{ ino=%lu v=%lu }",inode->i_ino,inode->i_version);
4363 + vnode = AFS_FS_I(inode);
4365 + ret = afs_inode_fetch_status(inode);
4366 + if (ret==-ENOENT) {
4367 + _leave(" = %d [%d %p]",ret,atomic_read(&dentry->d_count),dentry->d_inode);
4371 + make_bad_inode(inode);
4372 + _leave(" = %d",ret);
4376 + /* transfer attributes from the inode structure to the stat structure */
4377 + generic_fillattr(inode,stat);
4379 + _leave(" = 0 CB { v=%u x=%u t=%u }",
4380 + vnode->callback.version,
4381 + vnode->callback.expiry,
4382 + vnode->callback.type);
4385 +} /* end afs_inode_getattr() */
4388 +/*****************************************************************************/
4390 + * revalidate the inode
4392 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
4393 +int afs_inode_revalidate(struct dentry *dentry)
4395 + struct inode *inode;
4396 + afs_vnode_t *vnode;
4399 + inode = dentry->d_inode;
4401 + _enter("{ ino=%lu v=%lu }",inode->i_ino,inode->i_version);
4403 + vnode = AFS_FS_I(inode);
4405 + ret = afs_inode_fetch_status(inode);
4406 + if (ret==-ENOENT) {
4407 + _leave(" = %d [%d %p]",ret,atomic_read(&dentry->d_count),dentry->d_inode);
4411 + make_bad_inode(inode);
4412 + _leave(" = %d",ret);
4416 + _leave(" = 0 CB { v=%u x=%u t=%u }",
4417 + vnode->cb_version,
4422 +} /* end afs_inode_revalidate() */
4425 +/*****************************************************************************/
4427 + * clear an AFS inode
4429 +void afs_clear_inode(struct inode *inode)
4431 + afs_vnode_t *vnode;
4433 + vnode = AFS_FS_I(inode);
4435 + _enter("(ino=%lu { v=%u x=%u t=%u })",
4437 + vnode->cb_version,
4442 + afs_vnode_give_up_callback(vnode);
4445 +} /* end afs_clear_inode() */
4446 diff -urNp linux-5240/fs/afs/internal.h linux-5250/fs/afs/internal.h
4447 --- linux-5240/fs/afs/internal.h 1970-01-01 01:00:00.000000000 +0100
4448 +++ linux-5250/fs/afs/internal.h
4450 +/* internal.h: internal AFS stuff
4452 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4453 + * Written by David Howells (dhowells@redhat.com)
4455 + * This program is free software; you can redistribute it and/or
4456 + * modify it under the terms of the GNU General Public License
4457 + * as published by the Free Software Foundation; either version
4458 + * 2 of the License, or (at your option) any later version.
4461 +#ifndef AFS_INTERNAL_H
4462 +#define AFS_INTERNAL_H
4464 +#include <linux/version.h>
4465 +#include <linux/compiler.h>
4466 +#include <linux/kernel.h>
4467 +#include <linux/fs.h>
4472 +#define kenter(FMT,...) printk("==> %s("FMT")\n",__FUNCTION__,##__VA_ARGS__)
4473 +#define kleave(FMT,...) printk("<== %s()"FMT"\n",__FUNCTION__,##__VA_ARGS__)
4474 +#define kdebug(FMT,...) printk(FMT"\n",##__VA_ARGS__)
4475 +#define kproto(FMT,...) printk("### "FMT"\n",##__VA_ARGS__)
4476 +#define knet(FMT,...) printk(FMT"\n",##__VA_ARGS__)
4479 +#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
4480 +#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
4481 +#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
4482 +#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
4483 +#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
4485 +#define _enter(FMT,...) do { } while(0)
4486 +#define _leave(FMT,...) do { } while(0)
4487 +#define _debug(FMT,...) do { } while(0)
4488 +#define _proto(FMT,...) do { } while(0)
4489 +#define _net(FMT,...) do { } while(0)
4492 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
4493 +#define wait_on_page_locked wait_on_page
4494 +#define PageUptodate Page_Uptodate
4496 +static inline struct proc_dir_entry *PDE(const struct inode *inode)
4498 + return (struct proc_dir_entry *)inode->u.generic_ip;
4505 +extern struct rw_semaphore afs_proc_cells_sem;
4506 +extern struct list_head afs_proc_cells;
4511 +extern struct inode_operations afs_dir_inode_operations;
4512 +extern struct file_operations afs_dir_file_operations;
4517 +extern struct address_space_operations afs_fs_aops;
4518 +extern struct inode_operations afs_file_inode_operations;
4519 +extern struct file_operations afs_file_file_operations;
4524 +extern int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inode);
4525 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
4526 +extern int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
4528 +extern void afs_read_inode2(struct inode *inode, void *opaque);
4529 +extern int afs_inode_revalidate(struct dentry *dentry);
4531 +extern void afs_clear_inode(struct inode *inode);
4536 +extern struct inode_operations afs_mntpt_inode_operations;
4537 +extern struct file_operations afs_mntpt_file_operations;
4539 +extern int afs_mntpt_check_symlink(afs_vnode_t *vnode);
4544 +extern int afs_fs_init(void);
4545 +extern void afs_fs_exit(void);
4547 +#define AFS_CB_HASH_COUNT (PAGE_SIZE/sizeof(struct list_head))
4549 +extern struct list_head afs_cb_hash_tbl[];
4550 +extern spinlock_t afs_cb_hash_lock;
4552 +#define afs_cb_hash(SRV,FID) \
4553 + afs_cb_hash_tbl[((unsigned)(SRV) + (FID)->vid + (FID)->vnode + (FID)->unique) % \
4554 + AFS_CB_HASH_COUNT]
4559 +extern int afs_proc_init(void);
4560 +extern void afs_proc_cleanup(void);
4561 +extern int afs_proc_cell_setup(afs_cell_t *cell);
4562 +extern void afs_proc_cell_remove(afs_cell_t *cell);
4564 +#endif /* AFS_INTERNAL_H */
4565 diff -urNp linux-5240/fs/afs/kafsasyncd.c linux-5250/fs/afs/kafsasyncd.c
4566 --- linux-5240/fs/afs/kafsasyncd.c 1970-01-01 01:00:00.000000000 +0100
4567 +++ linux-5250/fs/afs/kafsasyncd.c
4569 +/* kafsasyncd.c: AFS asynchronous operation daemon
4571 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4572 + * Written by David Howells (dhowells@redhat.com)
4574 + * This program is free software; you can redistribute it and/or
4575 + * modify it under the terms of the GNU General Public License
4576 + * as published by the Free Software Foundation; either version
4577 + * 2 of the License, or (at your option) any later version.
4580 + * The AFS async daemon is used to the following:
4581 + * - probe "dead" servers to see whether they've come back to life yet.
4582 + * - probe "live" servers that we haven't talked to for a while to see if they are better
4583 + * candidates for serving than what we're currently using
4584 + * - poll volume location servers to keep up to date volume location lists
4587 +#include <linux/version.h>
4588 +#include <linux/module.h>
4589 +#include <linux/init.h>
4590 +#include <linux/sched.h>
4591 +#include <linux/completion.h>
4593 +#include "server.h"
4594 +#include "volume.h"
4595 +#include "kafsasyncd.h"
4596 +#include "kafstimod.h"
4597 +#include <rxrpc/call.h>
4598 +#include <asm/errno.h>
4599 +#include "internal.h"
4601 +static DECLARE_COMPLETION(kafsasyncd_alive);
4602 +static DECLARE_COMPLETION(kafsasyncd_dead);
4603 +static DECLARE_WAIT_QUEUE_HEAD(kafsasyncd_sleepq);
4604 +static struct task_struct *kafsasyncd_task;
4605 +static int kafsasyncd_die;
4607 +static int kafsasyncd(void *arg);
4609 +static LIST_HEAD(kafsasyncd_async_attnq);
4610 +static LIST_HEAD(kafsasyncd_async_busyq);
4611 +static spinlock_t kafsasyncd_async_lock = SPIN_LOCK_UNLOCKED;
4613 +static void kafsasyncd_null_call_attn_func(struct rxrpc_call *call)
4617 +static void kafsasyncd_null_call_error_func(struct rxrpc_call *call)
4621 +/*****************************************************************************/
4623 + * start the async daemon
4625 +int afs_kafsasyncd_start(void)
4629 + ret = kernel_thread(kafsasyncd,NULL,0);
4633 + wait_for_completion(&kafsasyncd_alive);
4636 +} /* end afs_kafsasyncd_start() */
4638 +/*****************************************************************************/
4640 + * stop the async daemon
4642 +void afs_kafsasyncd_stop(void)
4644 + /* get rid of my daemon */
4645 + kafsasyncd_die = 1;
4646 + wake_up(&kafsasyncd_sleepq);
4647 + wait_for_completion(&kafsasyncd_dead);
4649 +} /* end afs_kafsasyncd_stop() */
4651 +/*****************************************************************************/
4655 +static int kafsasyncd(void *arg)
4657 + DECLARE_WAITQUEUE(myself,current);
4659 + struct list_head *_p;
4662 + kafsasyncd_task = current;
4664 + printk("kAFS: Started kafsasyncd %d\n",current->pid);
4665 + strcpy(current->comm,"kafsasyncd");
4669 + complete(&kafsasyncd_alive);
4671 + /* only certain signals are of interest */
4672 + spin_lock_irq(¤t->sigmask_lock);
4673 + siginitsetinv(¤t->blocked,0);
4674 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
4675 + recalc_sigpending();
4677 + recalc_sigpending(current);
4679 + spin_unlock_irq(¤t->sigmask_lock);
4681 + /* loop around looking for things to attend to */
4683 + set_current_state(TASK_INTERRUPTIBLE);
4684 + add_wait_queue(&kafsasyncd_sleepq,&myself);
4687 + if (!list_empty(&kafsasyncd_async_attnq) ||
4688 + signal_pending(current) ||
4693 + set_current_state(TASK_INTERRUPTIBLE);
4696 + remove_wait_queue(&kafsasyncd_sleepq,&myself);
4697 + set_current_state(TASK_RUNNING);
4699 + /* discard pending signals */
4700 + while (signal_pending(current)) {
4703 + spin_lock_irq(¤t->sigmask_lock);
4704 + dequeue_signal(¤t->blocked,&sinfo);
4705 + spin_unlock_irq(¤t->sigmask_lock);
4708 + die = kafsasyncd_die;
4710 + /* deal with the next asynchronous operation requiring attention */
4711 + if (!list_empty(&kafsasyncd_async_attnq)) {
4712 + struct afs_async_op *op;
4714 + _debug("@@@ Begin Asynchronous Operation");
4717 + spin_lock(&kafsasyncd_async_lock);
4719 + if (!list_empty(&kafsasyncd_async_attnq)) {
4720 + op = list_entry(kafsasyncd_async_attnq.next,afs_async_op_t,link);
4721 + list_del(&op->link);
4722 + list_add_tail(&op->link,&kafsasyncd_async_busyq);
4725 + spin_unlock(&kafsasyncd_async_lock);
4727 + _debug("@@@ Operation %p {%p}\n",op,op?op->ops:NULL);
4730 + op->ops->attend(op);
4732 + _debug("@@@ End Asynchronous Operation");
4737 + /* need to kill all outstanding asynchronous operations before exiting */
4738 + kafsasyncd_task = NULL;
4739 + spin_lock(&kafsasyncd_async_lock);
4741 + /* fold the busy and attention queues together */
4742 + list_splice(&kafsasyncd_async_busyq,&kafsasyncd_async_attnq);
4743 + list_del_init(&kafsasyncd_async_busyq);
4745 + /* dequeue kafsasyncd from all their wait queues */
4746 + list_for_each(_p,&kafsasyncd_async_attnq) {
4747 + afs_async_op_t *op = list_entry(_p,afs_async_op_t,link);
4749 + op->call->app_attn_func = kafsasyncd_null_call_attn_func;
4750 + op->call->app_error_func = kafsasyncd_null_call_error_func;
4751 + remove_wait_queue(&op->call->waitq,&op->waiter);
4754 + spin_unlock(&kafsasyncd_async_lock);
4756 + /* abort all the operations */
4757 + while (!list_empty(&kafsasyncd_async_attnq)) {
4758 + afs_async_op_t *op = list_entry(_p,afs_async_op_t,link);
4759 + list_del_init(&op->link);
4761 + rxrpc_call_abort(op->call,-EIO);
4762 + rxrpc_put_call(op->call);
4765 + op->ops->discard(op);
4768 + /* and that's all */
4770 + complete_and_exit(&kafsasyncd_dead,0);
4772 +} /* end kafsasyncd() */
4774 +/*****************************************************************************/
4776 + * begin an operation
4777 + * - place operation on busy queue
4779 +void afs_kafsasyncd_begin_op(afs_async_op_t *op)
4783 + spin_lock(&kafsasyncd_async_lock);
4785 + init_waitqueue_entry(&op->waiter,kafsasyncd_task);
4787 + list_del(&op->link);
4788 + list_add_tail(&op->link,&kafsasyncd_async_busyq);
4790 + spin_unlock(&kafsasyncd_async_lock);
4793 +} /* end afs_kafsasyncd_begin_op() */
4795 +/*****************************************************************************/
4797 + * request attention for an operation
4798 + * - move to attention queue
4800 +void afs_kafsasyncd_attend_op(afs_async_op_t *op)
4804 + spin_lock(&kafsasyncd_async_lock);
4806 + list_del(&op->link);
4807 + list_add_tail(&op->link,&kafsasyncd_async_attnq);
4809 + spin_unlock(&kafsasyncd_async_lock);
4811 + wake_up(&kafsasyncd_sleepq);
4814 +} /* end afs_kafsasyncd_attend_op() */
4816 +/*****************************************************************************/
4818 + * terminate an operation
4819 + * - remove from either queue
4821 +void afs_kafsasyncd_terminate_op(afs_async_op_t *op)
4825 + spin_lock(&kafsasyncd_async_lock);
4827 + list_del_init(&op->link);
4829 + spin_unlock(&kafsasyncd_async_lock);
4831 + wake_up(&kafsasyncd_sleepq);
4834 +} /* end afs_kafsasyncd_terminate_op() */
4835 diff -urNp linux-5240/fs/afs/kafsasyncd.h linux-5250/fs/afs/kafsasyncd.h
4836 --- linux-5240/fs/afs/kafsasyncd.h 1970-01-01 01:00:00.000000000 +0100
4837 +++ linux-5250/fs/afs/kafsasyncd.h
4839 +/* kafsasyncd.h: AFS asynchronous operation daemon
4841 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4842 + * Written by David Howells (dhowells@redhat.com)
4844 + * This program is free software; you can redistribute it and/or
4845 + * modify it under the terms of the GNU General Public License
4846 + * as published by the Free Software Foundation; either version
4847 + * 2 of the License, or (at your option) any later version.
4850 +#ifndef _LINUX_AFS_KAFSASYNCD_H
4851 +#define _LINUX_AFS_KAFSASYNCD_H
4855 +struct afs_async_op_ops {
4856 + void (*attend)(afs_async_op_t *op);
4857 + void (*discard)(afs_async_op_t *op);
4860 +/*****************************************************************************/
4862 + * asynchronous operation record
4864 +struct afs_async_op
4866 + struct list_head link;
4867 + afs_server_t *server; /* server being contacted */
4868 + struct rxrpc_call *call; /* RxRPC call performing op */
4869 + wait_queue_t waiter; /* wait queue for kafsasyncd */
4870 + const struct afs_async_op_ops *ops; /* operations */
4873 +static inline void afs_async_op_init(afs_async_op_t *op, const struct afs_async_op_ops *ops)
4875 + INIT_LIST_HEAD(&op->link);
4880 +extern int afs_kafsasyncd_start(void);
4881 +extern void afs_kafsasyncd_stop(void);
4883 +extern void afs_kafsasyncd_begin_op(afs_async_op_t *op);
4884 +extern void afs_kafsasyncd_attend_op(afs_async_op_t *op);
4885 +extern void afs_kafsasyncd_terminate_op(afs_async_op_t *op);
4887 +#endif /* _LINUX_AFS_KAFSASYNCD_H */
4888 diff -urNp linux-5240/fs/afs/kafstimod.c linux-5250/fs/afs/kafstimod.c
4889 --- linux-5240/fs/afs/kafstimod.c 1970-01-01 01:00:00.000000000 +0100
4890 +++ linux-5250/fs/afs/kafstimod.c
4892 +/* kafstimod.c: AFS timeout daemon
4894 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4895 + * Written by David Howells (dhowells@redhat.com)
4897 + * This program is free software; you can redistribute it and/or
4898 + * modify it under the terms of the GNU General Public License
4899 + * as published by the Free Software Foundation; either version
4900 + * 2 of the License, or (at your option) any later version.
4903 +#include <linux/version.h>
4904 +#include <linux/module.h>
4905 +#include <linux/init.h>
4906 +#include <linux/sched.h>
4907 +#include <linux/completion.h>
4909 +#include "volume.h"
4910 +#include "kafstimod.h"
4911 +#include <asm/errno.h>
4912 +#include "internal.h"
4914 +static DECLARE_COMPLETION(kafstimod_alive);
4915 +static DECLARE_COMPLETION(kafstimod_dead);
4916 +static DECLARE_WAIT_QUEUE_HEAD(kafstimod_sleepq);
4917 +static int kafstimod_die;
4919 +static LIST_HEAD(kafstimod_list);
4920 +static spinlock_t kafstimod_lock = SPIN_LOCK_UNLOCKED;
4922 +static int kafstimod(void *arg);
4924 +/*****************************************************************************/
4926 + * start the timeout daemon
4928 +int afs_kafstimod_start(void)
4932 + ret = kernel_thread(kafstimod,NULL,0);
4936 + wait_for_completion(&kafstimod_alive);
4939 +} /* end afs_kafstimod_start() */
4941 +/*****************************************************************************/
4943 + * stop the timeout daemon
4945 +void afs_kafstimod_stop(void)
4947 + /* get rid of my daemon */
4948 + kafstimod_die = 1;
4949 + wake_up(&kafstimod_sleepq);
4950 + wait_for_completion(&kafstimod_dead);
4952 +} /* end afs_kafstimod_stop() */
4954 +/*****************************************************************************/
4956 + * timeout processing daemon
4958 +static int kafstimod(void *arg)
4960 + DECLARE_WAITQUEUE(myself,current);
4962 + afs_timer_t *timer;
4964 + printk("kAFS: Started kafstimod %d\n",current->pid);
4965 + strcpy(current->comm,"kafstimod");
4969 + complete(&kafstimod_alive);
4971 + /* only certain signals are of interest */
4972 + spin_lock_irq(¤t->sigmask_lock);
4973 + siginitsetinv(¤t->blocked,0);
4974 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
4975 + recalc_sigpending();
4977 + recalc_sigpending(current);
4979 + spin_unlock_irq(¤t->sigmask_lock);
4981 + /* loop around looking for things to attend to */
4983 + set_current_state(TASK_INTERRUPTIBLE);
4984 + add_wait_queue(&kafstimod_sleepq,&myself);
4987 + unsigned long jif;
4988 + signed long timeout;
4990 + /* deal with the server being asked to die */
4991 + if (kafstimod_die) {
4992 + remove_wait_queue(&kafstimod_sleepq,&myself);
4994 + complete_and_exit(&kafstimod_dead,0);
4997 + /* discard pending signals */
4998 + while (signal_pending(current)) {
5001 + spin_lock_irq(¤t->sigmask_lock);
5002 + dequeue_signal(¤t->blocked,&sinfo);
5003 + spin_unlock_irq(¤t->sigmask_lock);
5006 + /* work out the time to elapse before the next event */
5007 + spin_lock(&kafstimod_lock);
5008 + if (list_empty(&kafstimod_list)) {
5009 + timeout = MAX_SCHEDULE_TIMEOUT;
5012 + timer = list_entry(kafstimod_list.next,afs_timer_t,link);
5013 + timeout = timer->timo_jif;
5016 + if (time_before_eq(timeout,jif))
5020 + timeout = (long)timeout - (long)jiffies;
5023 + spin_unlock(&kafstimod_lock);
5025 + schedule_timeout(timeout);
5027 + set_current_state(TASK_INTERRUPTIBLE);
5030 + /* the thing on the front of the queue needs processing
5031 + * - we come here with the lock held and timer pointing to the expired entry
5034 + remove_wait_queue(&kafstimod_sleepq,&myself);
5035 + set_current_state(TASK_RUNNING);
5037 + _debug("@@@ Begin Timeout of %p",timer);
5039 + /* dequeue the timer */
5040 + list_del_init(&timer->link);
5041 + spin_unlock(&kafstimod_lock);
5043 + /* call the timeout function */
5044 + timer->ops->timed_out(timer);
5046 + _debug("@@@ End Timeout");
5049 +} /* end kafstimod() */
5051 +/*****************************************************************************/
5053 + * (re-)queue a timer
5055 +void afs_kafstimod_add_timer(afs_timer_t *timer, unsigned long timeout)
5057 + struct list_head *_p;
5058 + afs_timer_t *ptimer;
5060 + _enter("%p,%lu",timer,timeout);
5062 + spin_lock(&kafstimod_lock);
5064 + list_del(&timer->link);
5066 + /* the timer was deferred or reset - put it back in the queue at the right place */
5067 + timer->timo_jif = jiffies + timeout;
5069 + list_for_each(_p,&kafstimod_list) {
5070 + ptimer = list_entry(_p,afs_timer_t,link);
5071 + if (time_before(timer->timo_jif,ptimer->timo_jif))
5075 + list_add_tail(&timer->link,_p); /* insert before stopping point */
5077 + spin_unlock(&kafstimod_lock);
5079 + wake_up(&kafstimod_sleepq);
5082 +} /* end afs_kafstimod_queue_vlocation() */
5084 +/*****************************************************************************/
5087 + * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
5089 +int afs_kafstimod_del_timer(afs_timer_t *timer)
5093 + _enter("%p",timer);
5095 + spin_lock(&kafstimod_lock);
5097 + if (list_empty(&timer->link))
5100 + list_del_init(&timer->link);
5102 + spin_unlock(&kafstimod_lock);
5104 + wake_up(&kafstimod_sleepq);
5106 + _leave(" = %d",ret);
5108 +} /* end afs_kafstimod_del_timer() */
5109 diff -urNp linux-5240/fs/afs/kafstimod.h linux-5250/fs/afs/kafstimod.h
5110 --- linux-5240/fs/afs/kafstimod.h 1970-01-01 01:00:00.000000000 +0100
5111 +++ linux-5250/fs/afs/kafstimod.h
5113 +/* kafstimod.h: AFS timeout daemon
5115 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5116 + * Written by David Howells (dhowells@redhat.com)
5118 + * This program is free software; you can redistribute it and/or
5119 + * modify it under the terms of the GNU General Public License
5120 + * as published by the Free Software Foundation; either version
5121 + * 2 of the License, or (at your option) any later version.
5124 +#ifndef _LINUX_AFS_KAFSTIMOD_H
5125 +#define _LINUX_AFS_KAFSTIMOD_H
5129 +struct afs_timer_ops {
5130 + /* called when the front of the timer queue has timed out */
5131 + void (*timed_out)(struct afs_timer *timer);
5134 +/*****************************************************************************/
5136 + * AFS timer/timeout record
5140 + struct list_head link; /* link in timer queue */
5141 + unsigned long timo_jif; /* timeout time */
5142 + const struct afs_timer_ops *ops; /* timeout expiry function */
5145 +static inline void afs_timer_init(afs_timer_t *timer, const struct afs_timer_ops *ops)
5147 + INIT_LIST_HEAD(&timer->link);
5151 +extern int afs_kafstimod_start(void);
5152 +extern void afs_kafstimod_stop(void);
5154 +extern void afs_kafstimod_add_timer(afs_timer_t *timer, unsigned long timeout);
5155 +extern int afs_kafstimod_del_timer(afs_timer_t *timer);
5157 +#endif /* _LINUX_AFS_KAFSTIMOD_H */
5158 diff -urNp linux-5240/fs/afs/main.c linux-5250/fs/afs/main.c
5159 --- linux-5240/fs/afs/main.c 1970-01-01 01:00:00.000000000 +0100
5160 +++ linux-5250/fs/afs/main.c
5162 +/* main.c: AFS client file system
5164 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5165 + * Written by David Howells (dhowells@redhat.com)
5167 + * This program is free software; you can redistribute it and/or
5168 + * modify it under the terms of the GNU General Public License
5169 + * as published by the Free Software Foundation; either version
5170 + * 2 of the License, or (at your option) any later version.
5173 +#include <linux/module.h>
5174 +#include <linux/init.h>
5175 +#include <linux/sched.h>
5176 +#include <linux/completion.h>
5177 +#include <rxrpc/rxrpc.h>
5178 +#include <rxrpc/transport.h>
5179 +#include <rxrpc/call.h>
5180 +#include <rxrpc/peer.h>
5182 +#include "server.h"
5183 +#include "fsclient.h"
5184 +#include "cmservice.h"
5185 +#include "kafstimod.h"
5186 +#include "kafsasyncd.h"
5187 +#include "internal.h"
5189 +struct rxrpc_transport *afs_transport;
5191 +static int afs_init(void);
5192 +static void afs_exit(void);
5193 +static int afs_adding_peer(struct rxrpc_peer *peer);
5194 +static void afs_discarding_peer(struct rxrpc_peer *peer);
5196 +module_init(afs_init);
5197 +module_exit(afs_exit);
5199 +MODULE_DESCRIPTION("AFS Client File System");
5200 +MODULE_AUTHOR("Red Hat, Inc.");
5201 +MODULE_LICENSE("GPL");
5203 +static struct rxrpc_peer_ops afs_peer_ops = {
5204 + adding: afs_adding_peer,
5205 + discarding: afs_discarding_peer,
5208 +struct list_head afs_cb_hash_tbl[AFS_CB_HASH_COUNT];
5209 +spinlock_t afs_cb_hash_lock = SPIN_LOCK_UNLOCKED;
5211 +/*****************************************************************************/
5213 + * initialise the AFS client FS module
5215 +static int afs_init(void)
5219 + printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n");
5221 + /* initialise the callback hash table */
5222 + spin_lock_init(&afs_cb_hash_lock);
5223 + for (loop=AFS_CB_HASH_COUNT-1; loop>=0; loop--)
5224 + INIT_LIST_HEAD(&afs_cb_hash_tbl[loop]);
5226 + /* register the /proc stuff */
5227 + ret = afs_proc_init();
5231 + /* initialise the cell DB */
5232 + ret = afs_cell_init();
5236 + /* start the timeout daemon */
5237 + ret = afs_kafstimod_start();
5241 + /* start the async operation daemon */
5242 + ret = afs_kafsasyncd_start();
5244 + goto error_kafstimod;
5246 + /* create the RxRPC transport */
5247 + ret = rxrpc_create_transport(7001,&afs_transport);
5249 + goto error_kafsasyncd;
5251 + afs_transport->peer_ops = &afs_peer_ops;
5253 + /* register the filesystems */
5254 + ret = afs_fs_init();
5256 + goto error_transport;
5261 + rxrpc_put_transport(afs_transport);
5263 + afs_kafsasyncd_stop();
5265 + afs_kafstimod_stop();
5268 + afs_proc_cleanup();
5269 + printk(KERN_ERR "kAFS: failed to register: %d\n",ret);
5271 +} /* end afs_init() */
5273 +/*****************************************************************************/
5275 + * clean up on module removal
5277 +static void afs_exit(void)
5279 + printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n");
5282 + rxrpc_put_transport(afs_transport);
5283 + afs_kafstimod_stop();
5284 + afs_kafsasyncd_stop();
5286 + afs_proc_cleanup();
5288 +} /* end afs_exit() */
5290 +/*****************************************************************************/
5292 + * notification that new peer record is being added
5293 + * - called from krxsecd
5294 + * - return an error to induce an abort
5295 + * - mustn't sleep (caller holds an rwlock)
5297 +static int afs_adding_peer(struct rxrpc_peer *peer)
5299 + afs_server_t *server;
5302 + _debug("kAFS: Adding new peer %08x\n",ntohl(peer->addr.s_addr));
5304 + /* determine which server the peer resides in (if any) */
5305 + ret = afs_server_find_by_peer(peer,&server);
5307 + return ret; /* none that we recognise, so abort */
5309 + _debug("Server %p{u=%d}\n",server,atomic_read(&server->usage));
5311 + _debug("Cell %p{u=%d}\n",server->cell,atomic_read(&server->cell->usage));
5313 + /* cross-point the structs under a global lock */
5314 + spin_lock(&afs_server_peer_lock);
5315 + peer->user = server;
5316 + server->peer = peer;
5317 + spin_unlock(&afs_server_peer_lock);
5319 + afs_put_server(server);
5322 +} /* end afs_adding_peer() */
5324 +/*****************************************************************************/
5326 + * notification that a peer record is being discarded
5327 + * - called from krxiod or krxsecd
5329 +static void afs_discarding_peer(struct rxrpc_peer *peer)
5331 + afs_server_t *server;
5333 + _enter("%p",peer);
5335 + _debug("Discarding peer %08x (rtt=%lu.%lumS)\n",
5336 + ntohl(peer->addr.s_addr),
5340 + /* uncross-point the structs under a global lock */
5341 + spin_lock(&afs_server_peer_lock);
5342 + server = peer->user;
5344 + peer->user = NULL;
5345 + server->peer = NULL;
5347 + //_debug("Server %p{u=%d}\n",server,atomic_read(&server->usage));
5348 + //_debug("Cell %p{u=%d}\n",server->cell,atomic_read(&server->cell->usage));
5350 + spin_unlock(&afs_server_peer_lock);
5354 +} /* end afs_discarding_peer() */
5355 diff -urNp linux-5240/fs/afs/Makefile linux-5250/fs/afs/Makefile
5356 --- linux-5240/fs/afs/Makefile 1970-01-01 01:00:00.000000000 +0100
5357 +++ linux-5250/fs/afs/Makefile
5360 +# Makefile for Red Hat Linux AFS client.
5388 +# superfluous for 2.5, but needed for 2.4..
5389 +kafs.o: $(kafs-objs)
5390 + $(LD) -r -o kafs.o $(kafs-objs)
5392 +include $(TOPDIR)/Rules.make
5393 diff -urNp linux-5240/fs/afs/misc.c linux-5250/fs/afs/misc.c
5394 --- linux-5240/fs/afs/misc.c 1970-01-01 01:00:00.000000000 +0100
5395 +++ linux-5250/fs/afs/misc.c
5397 +/* misc.c: miscellaneous bits
5399 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5400 + * Written by David Howells (dhowells@redhat.com)
5402 + * This program is free software; you can redistribute it and/or
5403 + * modify it under the terms of the GNU General Public License
5404 + * as published by the Free Software Foundation; either version
5405 + * 2 of the License, or (at your option) any later version.
5408 +#include <linux/kernel.h>
5409 +#include <linux/module.h>
5410 +#include <linux/errno.h>
5411 +#include "errors.h"
5412 +#include "internal.h"
5414 +/*****************************************************************************/
5416 + * convert an AFS abort code to a Linux error number
5418 +int afs_abort_to_error(int abortcode)
5420 + switch (abortcode) {
5421 + case VSALVAGE: return -EIO;
5422 + case VNOVNODE: return -ENOENT;
5423 + case VNOVOL: return -ENXIO;
5424 + case VVOLEXISTS: return -EEXIST;
5425 + case VNOSERVICE: return -EIO;
5426 + case VOFFLINE: return -ENOENT;
5427 + case VONLINE: return -EEXIST;
5428 + case VDISKFULL: return -ENOSPC;
5429 + case VOVERQUOTA: return -EDQUOT;
5430 + case VBUSY: return -EBUSY;
5431 + case VMOVED: return -ENXIO;
5432 + default: return -EIO;
5435 +} /* end afs_abort_to_error() */
5436 diff -urNp linux-5240/fs/afs/mntpt.c linux-5250/fs/afs/mntpt.c
5437 --- linux-5240/fs/afs/mntpt.c 1970-01-01 01:00:00.000000000 +0100
5438 +++ linux-5250/fs/afs/mntpt.c
5440 +/* mntpt.c: mountpoint management
5442 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5443 + * Written by David Howells (dhowells@redhat.com)
5445 + * This program is free software; you can redistribute it and/or
5446 + * modify it under the terms of the GNU General Public License
5447 + * as published by the Free Software Foundation; either version
5448 + * 2 of the License, or (at your option) any later version.
5451 +#include <linux/kernel.h>
5452 +#include <linux/module.h>
5453 +#include <linux/init.h>
5454 +#include <linux/sched.h>
5455 +#include <linux/slab.h>
5456 +#include <linux/fs.h>
5457 +#include <linux/pagemap.h>
5458 +#include "volume.h"
5461 +#include "internal.h"
5464 +static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry);
5465 +static int afs_mntpt_open(struct inode *inode, struct file *file);
5466 +//static int afs_mntpt_readlink(struct dentry *dentry, char *buf, int size);
5468 +struct file_operations afs_mntpt_file_operations = {
5469 + open: afs_mntpt_open,
5472 +struct inode_operations afs_mntpt_inode_operations = {
5473 + lookup: afs_mntpt_lookup,
5474 + readlink: page_readlink,
5475 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
5476 + getattr: afs_inode_getattr,
5478 + revalidate: afs_inode_revalidate,
5482 +/*****************************************************************************/
5484 + * check a symbolic link to see whether it actually encodes a mountpoint
5485 + * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
5487 +int afs_mntpt_check_symlink(afs_vnode_t *vnode)
5489 + struct page *page;
5494 + _enter("{%u,%u}",vnode->fid.vnode,vnode->fid.unique);
5496 + /* read the contents of the symlink into the pagecache */
5497 + page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping,0,
5498 + (filler_t*)AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage,NULL);
5499 + if (IS_ERR(page)) {
5500 + ret = PTR_ERR(page);
5505 + wait_on_page_locked(page);
5507 + if (!PageUptodate(page))
5509 + if (PageError(page))
5512 + /* examine the symlink's contents */
5513 + size = vnode->status.size;
5514 + _debug("symlink to %*.*s",size,size,buf);
5517 + (buf[0]=='%' || buf[0]=='#') &&
5520 + _debug("symlink is a mountpoint");
5521 + spin_lock(&vnode->lock);
5522 + vnode->flags |= AFS_VNODE_MOUNTPOINT;
5523 + spin_unlock(&vnode->lock);
5530 + page_cache_release(page);
5532 + _leave(" = %d",ret);
5535 +} /* end afs_mntpt_check_symlink() */
5537 +/*****************************************************************************/
5539 + * no valid lookup procedure on this sort of dir
5541 +static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry)
5543 + return ERR_PTR(-EREMOTE);
5544 +} /* end afs_mntpt_lookup() */
5546 +/*****************************************************************************/
5548 + * no valid open procedure on this sort of dir
5550 +static int afs_mntpt_open(struct inode *inode, struct file *file)
5553 +} /* end afs_mntpt_open() */
5554 diff -urNp linux-5240/fs/afs/mount.h linux-5250/fs/afs/mount.h
5555 --- linux-5240/fs/afs/mount.h 1970-01-01 01:00:00.000000000 +0100
5556 +++ linux-5250/fs/afs/mount.h
5558 +/* mount.h: mount parameters
5560 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5561 + * Written by David Howells (dhowells@redhat.com)
5563 + * This program is free software; you can redistribute it and/or
5564 + * modify it under the terms of the GNU General Public License
5565 + * as published by the Free Software Foundation; either version
5566 + * 2 of the License, or (at your option) any later version.
5569 +#ifndef _LINUX_AFS_MOUNT_H
5570 +#define _LINUX_AFS_MOUNT_H
5572 +struct afs_mountdata {
5573 + const char *volume; /* name of volume */
5574 + const char *cell; /* name of cell containing volume */
5575 + const char *cache; /* name of cache block device */
5576 + size_t nservers; /* number of server addresses listed */
5577 + u_int32_t servers[10]; /* IP addresses of servers in this cell */
5580 +#endif /* _LINUX_AFS_MOUNT_H */
5581 diff -urNp linux-5240/fs/afs/proc.c linux-5250/fs/afs/proc.c
5582 --- linux-5240/fs/afs/proc.c 1970-01-01 01:00:00.000000000 +0100
5583 +++ linux-5250/fs/afs/proc.c
5585 +/* proc.c: /proc interface for AFS
5587 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
5588 + * Written by David Howells (dhowells@redhat.com)
5590 + * This program is free software; you can redistribute it and/or
5591 + * modify it under the terms of the GNU General Public License
5592 + * as published by the Free Software Foundation; either version
5593 + * 2 of the License, or (at your option) any later version.
5596 +#include <linux/sched.h>
5597 +#include <linux/slab.h>
5598 +#include <linux/module.h>
5599 +#include <linux/proc_fs.h>
5600 +#include <linux/seq_file.h>
5602 +#include "volume.h"
5603 +#include <asm/uaccess.h>
5604 +#include "internal.h"
5606 +static struct proc_dir_entry *proc_afs;
5609 +static int afs_proc_cells_open(struct inode *inode, struct file *file);
5610 +static void *afs_proc_cells_start(struct seq_file *p, loff_t *pos);
5611 +static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos);
5612 +static void afs_proc_cells_stop(struct seq_file *p, void *v);
5613 +static int afs_proc_cells_show(struct seq_file *m, void *v);
5614 +static ssize_t afs_proc_cells_write(struct file *file, const char *buf, size_t size, loff_t *_pos);
5616 +static struct seq_operations afs_proc_cells_ops = {
5617 + start: afs_proc_cells_start,
5618 + next: afs_proc_cells_next,
5619 + stop: afs_proc_cells_stop,
5620 + show: afs_proc_cells_show,
5623 +static struct file_operations afs_proc_cells_fops = {
5624 + open: afs_proc_cells_open,
5626 + write: afs_proc_cells_write,
5627 + llseek: seq_lseek,
5628 + release: seq_release,
5631 +static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file);
5632 +static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file);
5633 +static void *afs_proc_cell_volumes_start(struct seq_file *p, loff_t *pos);
5634 +static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, loff_t *pos);
5635 +static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v);
5636 +static int afs_proc_cell_volumes_show(struct seq_file *m, void *v);
5638 +static struct seq_operations afs_proc_cell_volumes_ops = {
5639 + start: afs_proc_cell_volumes_start,
5640 + next: afs_proc_cell_volumes_next,
5641 + stop: afs_proc_cell_volumes_stop,
5642 + show: afs_proc_cell_volumes_show,
5645 +static struct file_operations afs_proc_cell_volumes_fops = {
5646 + open: afs_proc_cell_volumes_open,
5648 + llseek: seq_lseek,
5649 + release: afs_proc_cell_volumes_release,
5652 +static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file);
5653 +static int afs_proc_cell_vlservers_release(struct inode *inode, struct file *file);
5654 +static void *afs_proc_cell_vlservers_start(struct seq_file *p, loff_t *pos);
5655 +static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, loff_t *pos);
5656 +static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v);
5657 +static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v);
5659 +static struct seq_operations afs_proc_cell_vlservers_ops = {
5660 + start: afs_proc_cell_vlservers_start,
5661 + next: afs_proc_cell_vlservers_next,
5662 + stop: afs_proc_cell_vlservers_stop,
5663 + show: afs_proc_cell_vlservers_show,
5666 +static struct file_operations afs_proc_cell_vlservers_fops = {
5667 + open: afs_proc_cell_vlservers_open,
5669 + llseek: seq_lseek,
5670 + release: afs_proc_cell_vlservers_release,
5673 +static int afs_proc_cell_servers_open(struct inode *inode, struct file *file);
5674 +static int afs_proc_cell_servers_release(struct inode *inode, struct file *file);
5675 +static void *afs_proc_cell_servers_start(struct seq_file *p, loff_t *pos);
5676 +static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, loff_t *pos);
5677 +static void afs_proc_cell_servers_stop(struct seq_file *p, void *v);
5678 +static int afs_proc_cell_servers_show(struct seq_file *m, void *v);
5680 +static struct seq_operations afs_proc_cell_servers_ops = {
5681 + start: afs_proc_cell_servers_start,
5682 + next: afs_proc_cell_servers_next,
5683 + stop: afs_proc_cell_servers_stop,
5684 + show: afs_proc_cell_servers_show,
5687 +static struct file_operations afs_proc_cell_servers_fops = {
5688 + open: afs_proc_cell_servers_open,
5690 + llseek: seq_lseek,
5691 + release: afs_proc_cell_servers_release,
5694 +/*****************************************************************************/
5696 + * initialise the /proc/fs/afs/ directory
5698 +int afs_proc_init(void)
5700 + struct proc_dir_entry *p;
5704 + proc_afs = proc_mkdir("fs/afs",NULL);
5707 + proc_afs->owner = THIS_MODULE;
5709 + p = create_proc_entry("cells",0,proc_afs);
5712 + p->proc_fops = &afs_proc_cells_fops;
5713 + p->owner = THIS_MODULE;
5720 + remove_proc_entry("cells",proc_afs);
5723 + remove_proc_entry("fs/afs",NULL);
5725 + _leave(" = -ENOMEM");
5728 +} /* end afs_proc_init() */
5730 +/*****************************************************************************/
5732 + * clean up the /proc/fs/afs/ directory
5734 +void afs_proc_cleanup(void)
5736 + remove_proc_entry("cells",proc_afs);
5738 + remove_proc_entry("fs/afs",NULL);
5740 +} /* end afs_proc_cleanup() */
5742 +/*****************************************************************************/
5744 + * open "/proc/fs/afs/cells" which provides a summary of extant cells
5746 +static int afs_proc_cells_open(struct inode *inode, struct file *file)
5748 + struct seq_file *m;
5751 + ret = seq_open(file,&afs_proc_cells_ops);
5755 + m = file->private_data;
5756 + m->private = PDE(inode)->data;
5759 +} /* end afs_proc_cells_open() */
5761 +/*****************************************************************************/
5763 + * set up the iterator to start reading from the cells list and return the first item
5765 +static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
5767 + struct list_head *_p;
5768 + loff_t pos = *_pos;
5770 + /* lock the list against modification */
5771 + down_read(&afs_proc_cells_sem);
5773 + /* allow for the header line */
5778 + /* find the n'th element in the list */
5779 + list_for_each(_p,&afs_proc_cells)
5783 + return _p!=&afs_proc_cells ? _p : NULL;
5784 +} /* end afs_proc_cells_start() */
5786 +/*****************************************************************************/
5788 + * move to next cell in cells list
5790 +static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos)
5792 + struct list_head *_p;
5797 + _p = v==(void*)1 ? afs_proc_cells.next : _p->next;
5799 + return _p!=&afs_proc_cells ? _p : NULL;
5800 +} /* end afs_proc_cells_next() */
5802 +/*****************************************************************************/
5804 + * clean up after reading from the cells list
5806 +static void afs_proc_cells_stop(struct seq_file *p, void *v)
5808 + up_read(&afs_proc_cells_sem);
5810 +} /* end afs_proc_cells_stop() */
5812 +/*****************************************************************************/
5814 + * display a header line followed by a load of cell lines
5816 +static int afs_proc_cells_show(struct seq_file *m, void *v)
5818 + afs_cell_t *cell = list_entry(v,afs_cell_t,proc_link);
5820 + /* display header on line 1 */
5821 + if (v == (void *)1) {
5822 + seq_puts(m, "USE IX NAME\n");
5826 + /* display one cell per line on subsequent lines */
5827 + seq_printf(m,"%3d %3u %s\n",
5828 + atomic_read(&cell->usage),
5834 +} /* end afs_proc_cells_show() */
5836 +/*****************************************************************************/
5838 + * handle writes to /proc/fs/afs/cells
5839 + * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]*
5841 +static ssize_t afs_proc_cells_write(struct file *file, const char *buf, size_t size, loff_t *_pos)
5843 + char *kbuf, *name, *args;
5846 + /* start by dragging the command into memory */
5847 + if (size<=1 || size>=PAGE_SIZE)
5850 + kbuf = kmalloc(size+1,GFP_KERNEL);
5855 + if (copy_from_user(kbuf,buf,size)!=0)
5859 + /* trim to first NL */
5860 + name = memchr(kbuf,'\n',size);
5861 + if (name) *name = 0;
5863 + /* split into command, name and argslist */
5864 + name = strchr(kbuf,' ');
5865 + if (!name) goto inval;
5866 + do { *name++ = 0; } while(*name==' ');
5867 + if (!*name) goto inval;
5869 + args = strchr(name,' ');
5870 + if (!args) goto inval;
5871 + do { *args++ = 0; } while(*args==' ');
5872 + if (!*args) goto inval;
5874 + /* determine command to perform */
5875 + _debug("cmd=%s name=%s args=%s",kbuf,name,args);
5877 + if (strcmp(kbuf,"add")==0) {
5879 + ret = afs_cell_create(name,args,&cell);
5883 + printk("kAFS: Added new cell '%s'\n",name);
5893 + _leave(" = %d",ret);
5898 + printk("kAFS: Invalid Command on /proc/fs/afs/cells file\n");
5900 +} /* end afs_proc_cells_write() */
5902 +/*****************************************************************************/
5904 + * initialise /proc/fs/afs/<cell>/
5906 +int afs_proc_cell_setup(afs_cell_t *cell)
5908 + struct proc_dir_entry *p;
5910 + _enter("%p{%s}",cell,cell->name);
5912 + cell->proc_dir = proc_mkdir(cell->name,proc_afs);
5913 + if (!cell->proc_dir)
5916 + p = create_proc_entry("servers",0,cell->proc_dir);
5919 + p->proc_fops = &afs_proc_cell_servers_fops;
5920 + p->owner = THIS_MODULE;
5923 + p = create_proc_entry("vlservers",0,cell->proc_dir);
5925 + goto error_servers;
5926 + p->proc_fops = &afs_proc_cell_vlservers_fops;
5927 + p->owner = THIS_MODULE;
5930 + p = create_proc_entry("volumes",0,cell->proc_dir);
5932 + goto error_vlservers;
5933 + p->proc_fops = &afs_proc_cell_volumes_fops;
5934 + p->owner = THIS_MODULE;
5941 + remove_proc_entry("vlservers",cell->proc_dir);
5943 + remove_proc_entry("servers",cell->proc_dir);
5945 + remove_proc_entry(cell->name,proc_afs);
5946 + _leave(" = -ENOMEM");
5948 +} /* end afs_proc_cell_setup() */
5950 +/*****************************************************************************/
5952 + * remove /proc/fs/afs/<cell>/
5954 +void afs_proc_cell_remove(afs_cell_t *cell)
5958 + remove_proc_entry("volumes",cell->proc_dir);
5959 + remove_proc_entry("vlservers",cell->proc_dir);
5960 + remove_proc_entry("servers",cell->proc_dir);
5961 + remove_proc_entry(cell->name,proc_afs);
5964 +} /* end afs_proc_cell_remove() */
5966 +/*****************************************************************************/
5968 + * open "/proc/fs/afs/<cell>/volumes" which provides a summary of extant cells
5970 +static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file)
5972 + struct seq_file *m;
5976 + cell = afs_get_cell_maybe((afs_cell_t**)&PDE(inode)->data);
5980 + ret = seq_open(file,&afs_proc_cell_volumes_ops);
5984 + m = file->private_data;
5985 + m->private = cell;
5988 +} /* end afs_proc_cell_volumes_open() */
5990 +/*****************************************************************************/
5992 + * close the file and release the ref to the cell
5994 +static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file)
5996 + afs_cell_t *cell = PDE(inode)->data;
5999 + ret = seq_release(inode,file);
6001 + afs_put_cell(cell);
6003 +} /* end afs_proc_cell_volumes_release() */
6005 +/*****************************************************************************/
6007 + * set up the iterator to start reading from the cells list and return the first item
6009 +static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
6011 + struct list_head *_p;
6012 + afs_cell_t *cell = m->private;
6013 + loff_t pos = *_pos;
6015 + _enter("cell=%p pos=%Ld",cell,*_pos);
6017 + /* lock the list against modification */
6018 + down_read(&cell->vl_sem);
6020 + /* allow for the header line */
6025 + /* find the n'th element in the list */
6026 + list_for_each(_p,&cell->vl_list)
6030 + return _p!=&cell->vl_list ? _p : NULL;
6031 +} /* end afs_proc_cell_volumes_start() */
6033 +/*****************************************************************************/
6035 + * move to next cell in cells list
6037 +static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, loff_t *_pos)
6039 + struct list_head *_p;
6040 + afs_cell_t *cell = p->private;
6042 + _enter("cell=%p pos=%Ld",cell,*_pos);
6047 + _p = v==(void*)1 ? cell->vl_list.next : _p->next;
6049 + return _p!=&cell->vl_list ? _p : NULL;
6050 +} /* end afs_proc_cell_volumes_next() */
6052 +/*****************************************************************************/
6054 + * clean up after reading from the cells list
6056 +static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v)
6058 + afs_cell_t *cell = p->private;
6060 + up_read(&cell->vl_sem);
6062 +} /* end afs_proc_cell_volumes_stop() */
6064 +/*****************************************************************************/
6066 + * display a header line followed by a load of volume lines
6068 +static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
6070 + afs_vlocation_t *vlocation = list_entry(v,afs_vlocation_t,link);
6072 + /* display header on line 1 */
6073 + if (v == (void *)1) {
6074 + seq_puts(m, "USE IX VLID[0] VLID[1] VLID[2] NAME\n");
6078 + /* display one cell per line on subsequent lines */
6079 + seq_printf(m,"%3d %3hu %08x %08x %08x %s\n",
6080 + atomic_read(&vlocation->usage),
6081 + vlocation->vix.index,
6082 + vlocation->vldb.vid[0],
6083 + vlocation->vldb.vid[1],
6084 + vlocation->vldb.vid[2],
6085 + vlocation->vldb.name
6089 +} /* end afs_proc_cell_volumes_show() */
6091 +/*****************************************************************************/
6093 + * open "/proc/fs/afs/<cell>/vlservers" which provides a list of volume location server
6095 +static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file)
6097 + struct seq_file *m;
6101 + cell = afs_get_cell_maybe((afs_cell_t**)&PDE(inode)->data);
6105 + ret = seq_open(file,&afs_proc_cell_vlservers_ops);
6109 + m = file->private_data;
6110 + m->private = cell;
6113 +} /* end afs_proc_cell_vlservers_open() */
6115 +/*****************************************************************************/
6117 + * close the file and release the ref to the cell
6119 +static int afs_proc_cell_vlservers_release(struct inode *inode, struct file *file)
6121 + afs_cell_t *cell = PDE(inode)->data;
6124 + ret = seq_release(inode,file);
6126 + afs_put_cell(cell);
6128 +} /* end afs_proc_cell_vlservers_release() */
6130 +/*****************************************************************************/
6132 + * set up the iterator to start reading from the cells list and return the first item
6134 +static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
6136 + afs_cell_t *cell = m->private;
6137 + loff_t pos = *_pos;
6139 + _enter("cell=%p pos=%Ld",cell,*_pos);
6141 + /* lock the list against modification */
6142 + down_read(&cell->vl_sem);
6144 + /* allow for the header line */
6149 + if (pos>=cell->vl_naddrs)
6152 + return &cell->vl_addrs[pos];
6153 +} /* end afs_proc_cell_vlservers_start() */
6155 +/*****************************************************************************/
6157 + * move to next cell in cells list
6159 +static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, loff_t *_pos)
6161 + afs_cell_t *cell = p->private;
6164 + _enter("cell=%p{nad=%u} pos=%Ld",cell,cell->vl_naddrs,*_pos);
6168 + if (pos>=cell->vl_naddrs)
6171 + return &cell->vl_addrs[pos];
6172 +} /* end afs_proc_cell_vlservers_next() */
6174 +/*****************************************************************************/
6176 + * clean up after reading from the cells list
6178 +static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v)
6180 + afs_cell_t *cell = p->private;
6182 + up_read(&cell->vl_sem);
6184 +} /* end afs_proc_cell_vlservers_stop() */
6186 +/*****************************************************************************/
6188 + * display a header line followed by a load of volume lines
6190 +static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
6192 + struct in_addr *addr = v;
6194 + /* display header on line 1 */
6195 + if (v == (struct in_addr *)1) {
6196 + seq_puts(m,"ADDRESS\n");
6200 + /* display one cell per line on subsequent lines */
6201 + seq_printf(m,"%u.%u.%u.%u\n",NIPQUAD(addr->s_addr));
6204 +} /* end afs_proc_cell_vlservers_show() */
6206 +/*****************************************************************************/
6208 + * open "/proc/fs/afs/<cell>/servers" which provides a summary of active servers
6210 +static int afs_proc_cell_servers_open(struct inode *inode, struct file *file)
6212 + struct seq_file *m;
6216 + cell = afs_get_cell_maybe((afs_cell_t**)&PDE(inode)->data);
6220 + ret = seq_open(file,&afs_proc_cell_servers_ops);
6224 + m = file->private_data;
6225 + m->private = cell;
6228 +} /* end afs_proc_cell_servers_open() */
6230 +/*****************************************************************************/
6232 + * close the file and release the ref to the cell
6234 +static int afs_proc_cell_servers_release(struct inode *inode, struct file *file)
6236 + afs_cell_t *cell = PDE(inode)->data;
6239 + ret = seq_release(inode,file);
6241 + afs_put_cell(cell);
6243 +} /* end afs_proc_cell_servers_release() */
6245 +/*****************************************************************************/
6247 + * set up the iterator to start reading from the cells list and return the first item
6249 +static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos)
6251 + struct list_head *_p;
6252 + afs_cell_t *cell = m->private;
6253 + loff_t pos = *_pos;
6255 + _enter("cell=%p pos=%Ld",cell,*_pos);
6257 + /* lock the list against modification */
6258 + read_lock(&cell->sv_lock);
6260 + /* allow for the header line */
6265 + /* find the n'th element in the list */
6266 + list_for_each(_p,&cell->sv_list)
6270 + return _p!=&cell->sv_list ? _p : NULL;
6271 +} /* end afs_proc_cell_servers_start() */
6273 +/*****************************************************************************/
6275 + * move to next cell in cells list
6277 +static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, loff_t *_pos)
6279 + struct list_head *_p;
6280 + afs_cell_t *cell = p->private;
6282 + _enter("cell=%p pos=%Ld",cell,*_pos);
6287 + _p = v==(void*)1 ? cell->sv_list.next : _p->next;
6289 + return _p!=&cell->sv_list ? _p : NULL;
6290 +} /* end afs_proc_cell_servers_next() */
6292 +/*****************************************************************************/
6294 + * clean up after reading from the cells list
6296 +static void afs_proc_cell_servers_stop(struct seq_file *p, void *v)
6298 + afs_cell_t *cell = p->private;
6300 + read_unlock(&cell->sv_lock);
6302 +} /* end afs_proc_cell_servers_stop() */
6304 +/*****************************************************************************/
6306 + * display a header line followed by a load of volume lines
6308 +static int afs_proc_cell_servers_show(struct seq_file *m, void *v)
6310 + afs_server_t *server = list_entry(v,afs_server_t,link);
6313 + /* display header on line 1 */
6314 + if (v == (void *)1) {
6315 + seq_puts(m, "USE ADDR STATE\n");
6319 + /* display one cell per line on subsequent lines */
6320 + sprintf(ipaddr,"%u.%u.%u.%u",NIPQUAD(server->addr));
6321 + seq_printf(m,"%3d %-15.15s %5d\n",
6322 + atomic_read(&server->usage),
6328 +} /* end afs_proc_cell_servers_show() */
6329 diff -urNp linux-5240/fs/afs/server.c linux-5250/fs/afs/server.c
6330 --- linux-5240/fs/afs/server.c 1970-01-01 01:00:00.000000000 +0100
6331 +++ linux-5250/fs/afs/server.c
6333 +/* server.c: AFS server record management
6335 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
6336 + * Written by David Howells (dhowells@redhat.com)
6338 + * This program is free software; you can redistribute it and/or
6339 + * modify it under the terms of the GNU General Public License
6340 + * as published by the Free Software Foundation; either version
6341 + * 2 of the License, or (at your option) any later version.
6344 +#include <linux/sched.h>
6345 +#include <linux/slab.h>
6346 +#include <rxrpc/peer.h>
6347 +#include <rxrpc/connection.h>
6348 +#include "volume.h"
6350 +#include "server.h"
6351 +#include "transport.h"
6353 +#include "vlclient.h"
6354 +#include "kafstimod.h"
6355 +#include "internal.h"
6357 +spinlock_t afs_server_peer_lock = SPIN_LOCK_UNLOCKED;
6359 +#define FS_SERVICE_ID 1 /* AFS Volume Location Service ID */
6360 +#define VL_SERVICE_ID 52 /* AFS Volume Location Service ID */
6362 +static void __afs_server_timeout(afs_timer_t *timer)
6364 + afs_server_t *server = list_entry(timer,afs_server_t,timeout);
6366 + _debug("SERVER TIMEOUT [%p{u=%d}]",server,atomic_read(&server->usage));
6368 + afs_server_do_timeout(server);
6371 +static const struct afs_timer_ops afs_server_timer_ops = {
6372 + timed_out: __afs_server_timeout,
6375 +/*****************************************************************************/
6377 + * lookup a server record in a cell
6378 + * - TODO: search the cell's server list
6380 +int afs_server_lookup(afs_cell_t *cell, const struct in_addr *addr, afs_server_t **_server)
6382 + struct list_head *_p;
6383 + afs_server_t *server, *active, *zombie;
6386 + _enter("%p,%08x,",cell,ntohl(addr->s_addr));
6388 + /* allocate and initialise a server record */
6389 + server = kmalloc(sizeof(afs_server_t),GFP_KERNEL);
6391 + _leave(" = -ENOMEM");
6395 + memset(server,0,sizeof(afs_server_t));
6396 + atomic_set(&server->usage,1);
6398 + INIT_LIST_HEAD(&server->link);
6399 + init_rwsem(&server->sem);
6400 + INIT_LIST_HEAD(&server->fs_callq);
6401 + spin_lock_init(&server->fs_lock);
6402 + INIT_LIST_HEAD(&server->cb_promises);
6403 + spin_lock_init(&server->cb_lock);
6405 + for (loop=0; loop<AFS_SERVER_CONN_LIST_SIZE; loop++)
6406 + server->fs_conn_cnt[loop] = 4;
6408 + memcpy(&server->addr,addr,sizeof(struct in_addr));
6409 + server->addr.s_addr = addr->s_addr;
6411 + afs_timer_init(&server->timeout,&afs_server_timer_ops);
6413 + /* add to the cell */
6414 + write_lock(&cell->sv_lock);
6416 + /* check the active list */
6417 + list_for_each(_p,&cell->sv_list) {
6418 + active = list_entry(_p,afs_server_t,link);
6420 + if (active->addr.s_addr==addr->s_addr)
6421 + goto use_active_server;
6424 + /* check the inactive list */
6425 + spin_lock(&cell->sv_gylock);
6426 + list_for_each(_p,&cell->sv_graveyard) {
6427 + zombie = list_entry(_p,afs_server_t,link);
6429 + if (zombie->addr.s_addr==addr->s_addr)
6430 + goto resurrect_server;
6432 + spin_unlock(&cell->sv_gylock);
6434 + afs_get_cell(cell);
6435 + server->cell = cell;
6436 + list_add_tail(&server->link,&cell->sv_list);
6438 + write_unlock(&cell->sv_lock);
6440 + *_server = server;
6441 + _leave(" = 0 (%p)",server);
6444 + /* found a matching active server */
6445 + use_active_server:
6446 + _debug("active server");
6447 + afs_get_server(active);
6448 + write_unlock(&cell->sv_lock);
6452 + *_server = active;
6453 + _leave(" = 0 (%p)",active);
6456 + /* found a matching server in the graveyard, so resurrect it and dispose of the new rec */
6458 + _debug("resurrecting server");
6460 + list_del(&zombie->link);
6461 + list_add_tail(&zombie->link,&cell->sv_list);
6462 + afs_get_server(zombie);
6463 + afs_kafstimod_del_timer(&zombie->timeout);
6464 + spin_unlock(&cell->sv_gylock);
6465 + write_unlock(&cell->sv_lock);
6469 + *_server = zombie;
6470 + _leave(" = 0 (%p)",zombie);
6473 +} /* end afs_server_lookup() */
6475 +/*****************************************************************************/
6477 + * destroy a server record
6478 + * - removes from the cell list
6480 +void afs_put_server(afs_server_t *server)
6484 + _enter("%p",server);
6486 + cell = server->cell;
6488 + /* sanity check */
6489 + if (atomic_read(&server->usage)<=0)
6492 + /* to prevent a race, the decrement and the dequeue must be effectively atomic */
6493 + write_lock(&cell->sv_lock);
6495 + if (likely(!atomic_dec_and_test(&server->usage))) {
6496 + write_unlock(&cell->sv_lock);
6501 + spin_lock(&cell->sv_gylock);
6502 + list_del(&server->link);
6503 + list_add_tail(&server->link,&cell->sv_graveyard);
6505 + /* time out in 10 secs */
6506 + afs_kafstimod_add_timer(&server->timeout,10*HZ);
6508 + spin_unlock(&cell->sv_gylock);
6509 + write_unlock(&cell->sv_lock);
6511 + _leave(" [killed]");
6512 +} /* end afs_put_server() */
6514 +/*****************************************************************************/
6516 + * timeout server record
6517 + * - removes from the cell's graveyard if the usage count is zero
6519 +void afs_server_do_timeout(afs_server_t *server)
6521 + struct rxrpc_peer *peer;
6525 + _enter("%p",server);
6527 + cell = server->cell;
6529 + if (atomic_read(&server->usage)<0) BUG();
6531 + /* remove from graveyard if still dead */
6532 + spin_lock(&cell->vl_gylock);
6533 + if (atomic_read(&server->usage)==0)
6534 + list_del_init(&server->link);
6537 + spin_unlock(&cell->vl_gylock);
6541 + return; /* resurrected */
6544 + /* we can now destroy it properly */
6545 + afs_put_cell(cell);
6547 + /* uncross-point the structs under a global lock */
6548 + spin_lock(&afs_server_peer_lock);
6549 + peer = server->peer;
6551 + server->peer = NULL;
6552 + peer->user = NULL;
6554 + spin_unlock(&afs_server_peer_lock);
6556 + /* finish cleaning up the server */
6557 + for (loop=AFS_SERVER_CONN_LIST_SIZE-1; loop>=0; loop--)
6558 + if (server->fs_conn[loop])
6559 + rxrpc_put_connection(server->fs_conn[loop]);
6561 + if (server->vlserver)
6562 + rxrpc_put_connection(server->vlserver);
6566 + _leave(" [destroyed]");
6567 +} /* end afs_server_do_timeout() */
6569 +/*****************************************************************************/
6571 + * get a callslot on a connection to the fileserver on the specified server
6573 +int afs_server_request_callslot(afs_server_t *server, struct afs_server_callslot *callslot)
6575 + struct afs_server_callslot *pcallslot;
6576 + struct rxrpc_connection *conn;
6579 + _enter("%p,",server);
6581 + INIT_LIST_HEAD(&callslot->link);
6582 + callslot->task = current;
6583 + callslot->conn = NULL;
6584 + callslot->nconn = -1;
6585 + callslot->ready = 0;
6590 + /* get hold of a callslot first */
6591 + spin_lock(&server->fs_lock);
6593 + /* resurrect the server if it's death timeout has expired */
6594 + if (server->fs_state) {
6595 + if (time_before(jiffies,server->fs_dead_jif)) {
6596 + ret = server->fs_state;
6597 + spin_unlock(&server->fs_lock);
6598 + _leave(" = %d [still dead]",ret);
6602 + server->fs_state = 0;
6605 + /* try and find a connection that has spare callslots */
6606 + for (nconn=0; nconn<AFS_SERVER_CONN_LIST_SIZE; nconn++) {
6607 + if (server->fs_conn_cnt[nconn]>0) {
6608 + server->fs_conn_cnt[nconn]--;
6609 + spin_unlock(&server->fs_lock);
6610 + callslot->nconn = nconn;
6611 + goto obtained_slot;
6615 + /* none were available - wait interruptibly for one to become available */
6616 + set_current_state(TASK_INTERRUPTIBLE);
6617 + list_add_tail(&callslot->link,&server->fs_callq);
6618 + spin_unlock(&server->fs_lock);
6620 + while (!callslot->ready && !signal_pending(current)) {
6622 + set_current_state(TASK_INTERRUPTIBLE);
6625 + set_current_state(TASK_RUNNING);
6627 + /* even if we were interrupted we may still be queued */
6628 + if (!callslot->ready) {
6629 + spin_lock(&server->fs_lock);
6630 + list_del_init(&callslot->link);
6631 + spin_unlock(&server->fs_lock);
6634 + nconn = callslot->nconn;
6636 + /* if interrupted, we must release any slot we also got before returning an error */
6637 + if (signal_pending(current)) {
6639 + goto error_release;
6642 + /* if we were woken up with an error, then pass that error back to the called */
6644 + _leave(" = %d",callslot->errno);
6645 + return callslot->errno;
6648 + /* were we given a connection directly? */
6649 + if (callslot->conn) {
6650 + /* yes - use it */
6651 + _leave(" = 0 (nc=%d)",nconn);
6655 + /* got a callslot, but no connection */
6658 + /* need to get hold of the RxRPC connection */
6659 + down_write(&server->sem);
6661 + /* quick check to see if there's an outstanding error */
6662 + ret = server->fs_state;
6664 + goto error_release_upw;
6666 + if (server->fs_conn[nconn]) {
6667 + /* reuse an existing connection */
6668 + rxrpc_get_connection(server->fs_conn[nconn]);
6669 + callslot->conn = server->fs_conn[nconn];
6672 + /* create a new connection */
6673 + ret = rxrpc_create_connection(afs_transport,
6675 + server->addr.s_addr,
6678 + &server->fs_conn[nconn]);
6681 + goto error_release_upw;
6683 + callslot->conn = server->fs_conn[0];
6684 + rxrpc_get_connection(callslot->conn);
6687 + up_write(&server->sem);
6692 + /* handle an error occurring */
6693 + error_release_upw:
6694 + up_write(&server->sem);
6697 + /* either release the callslot or pass it along to another deserving task */
6698 + spin_lock(&server->fs_lock);
6701 + /* no callslot allocated */
6703 + else if (list_empty(&server->fs_callq)) {
6704 + /* no one waiting */
6705 + server->fs_conn_cnt[nconn]++;
6706 + spin_unlock(&server->fs_lock);
6709 + /* someone's waiting - dequeue them and wake them up */
6710 + pcallslot = list_entry(server->fs_callq.next,struct afs_server_callslot,link);
6711 + list_del_init(&pcallslot->link);
6713 + pcallslot->errno = server->fs_state;
6714 + if (!pcallslot->errno) {
6715 + /* pass them out callslot details */
6716 + callslot->conn = xchg(&pcallslot->conn,callslot->conn);
6717 + pcallslot->nconn = nconn;
6718 + callslot->nconn = nconn = -1;
6720 + pcallslot->ready = 1;
6721 + wake_up_process(pcallslot->task);
6722 + spin_unlock(&server->fs_lock);
6725 + if (callslot->conn) rxrpc_put_connection(callslot->conn);
6726 + callslot->conn = NULL;
6728 + _leave(" = %d",ret);
6731 +} /* end afs_server_request_callslot() */
6733 +/*****************************************************************************/
6735 + * release a callslot back to the server
6736 + * - transfers the RxRPC connection to the next pending callslot if possible
6738 +void afs_server_release_callslot(afs_server_t *server, struct afs_server_callslot *callslot)
6740 + struct afs_server_callslot *pcallslot;
6742 + _enter("{ad=%08x,cnt=%u},{%d}",
6743 + ntohl(server->addr.s_addr),
6744 + server->fs_conn_cnt[callslot->nconn],
6747 + if (callslot->nconn<0) BUG();
6749 + spin_lock(&server->fs_lock);
6751 + if (list_empty(&server->fs_callq)) {
6752 + /* no one waiting */
6753 + server->fs_conn_cnt[callslot->nconn]++;
6754 + spin_unlock(&server->fs_lock);
6757 + /* someone's waiting - dequeue them and wake them up */
6758 + pcallslot = list_entry(server->fs_callq.next,struct afs_server_callslot,link);
6759 + list_del_init(&pcallslot->link);
6761 + pcallslot->errno = server->fs_state;
6762 + if (!pcallslot->errno) {
6763 + /* pass them out callslot details */
6764 + callslot->conn = xchg(&pcallslot->conn,callslot->conn);
6765 + pcallslot->nconn = callslot->nconn;
6766 + callslot->nconn = -1;
6769 + pcallslot->ready = 1;
6770 + wake_up_process(pcallslot->task);
6771 + spin_unlock(&server->fs_lock);
6774 + if (callslot->conn) rxrpc_put_connection(callslot->conn);
6777 +} /* end afs_server_release_callslot() */
6779 +/*****************************************************************************/
6781 + * get a handle to a connection to the vlserver (volume location) on the specified server
6783 +int afs_server_get_vlconn(afs_server_t *server, struct rxrpc_connection **_conn)
6785 + struct rxrpc_connection *conn;
6788 + _enter("%p,",server);
6792 + down_read(&server->sem);
6794 + if (server->vlserver) {
6795 + /* reuse an existing connection */
6796 + rxrpc_get_connection(server->vlserver);
6797 + conn = server->vlserver;
6798 + up_read(&server->sem);
6801 + /* create a new connection */
6802 + up_read(&server->sem);
6803 + down_write(&server->sem);
6804 + if (!server->vlserver) {
6805 + ret = rxrpc_create_connection(afs_transport,
6807 + server->addr.s_addr,
6810 + &server->vlserver);
6813 + rxrpc_get_connection(server->vlserver);
6814 + conn = server->vlserver;
6816 + up_write(&server->sem);
6820 + _leave(" = %d",ret);
6822 +} /* end afs_server_get_vlconn() */
6823 diff -urNp linux-5240/fs/afs/server.h linux-5250/fs/afs/server.h
6824 --- linux-5240/fs/afs/server.h 1970-01-01 01:00:00.000000000 +0100
6825 +++ linux-5250/fs/afs/server.h
6827 +/* server.h: AFS server record
6829 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
6830 + * Written by David Howells (dhowells@redhat.com)
6832 + * This program is free software; you can redistribute it and/or
6833 + * modify it under the terms of the GNU General Public License
6834 + * as published by the Free Software Foundation; either version
6835 + * 2 of the License, or (at your option) any later version.
6838 +#ifndef _LINUX_AFS_SERVER_H
6839 +#define _LINUX_AFS_SERVER_H
6842 +#include "kafstimod.h"
6843 +#include <rxrpc/peer.h>
6844 +#include <linux/rwsem.h>
6846 +extern spinlock_t afs_server_peer_lock;
6848 +/*****************************************************************************/
6850 + * AFS server record
6855 + afs_cell_t *cell; /* cell in which server resides */
6856 + struct list_head link; /* link in cell's server list */
6857 + struct rw_semaphore sem; /* access lock */
6858 + afs_timer_t timeout; /* graveyard timeout */
6859 + struct in_addr addr; /* server address */
6860 + struct rxrpc_peer *peer; /* peer record for this server */
6861 + struct rxrpc_connection *vlserver; /* connection to the volume location service */
6863 + /* file service access */
6864 +#define AFS_SERVER_CONN_LIST_SIZE 2
6865 + struct rxrpc_connection *fs_conn[AFS_SERVER_CONN_LIST_SIZE]; /* FS connections */
6866 + unsigned fs_conn_cnt[AFS_SERVER_CONN_LIST_SIZE]; /* per conn call count */
6867 + struct list_head fs_callq; /* queue of processes waiting to make a call */
6868 + spinlock_t fs_lock; /* access lock */
6869 + int fs_state; /* 0 or reason FS currently marked dead (-errno) */
6870 + unsigned fs_rtt; /* FS round trip time */
6871 + unsigned long fs_act_jif; /* time at which last activity occurred */
6872 + unsigned long fs_dead_jif; /* time at which no longer to be considered dead */
6874 + /* callback promise management */
6875 + struct list_head cb_promises; /* as yet unbroken promises from this server */
6876 + spinlock_t cb_lock; /* access lock */
6879 +extern int afs_server_lookup(afs_cell_t *cell, const struct in_addr *addr, afs_server_t **_server);
6881 +#define afs_get_server(S) do { atomic_inc(&(S)->usage); } while(0)
6883 +extern void afs_put_server(afs_server_t *server);
6884 +extern void afs_server_do_timeout(afs_server_t *server);
6886 +extern int afs_server_find_by_peer(const struct rxrpc_peer *peer, afs_server_t **_server);
6888 +extern int afs_server_get_vlconn(afs_server_t *server, struct rxrpc_connection **_conn);
6890 +static inline afs_server_t *afs_server_get_from_peer(struct rxrpc_peer *peer)
6892 + afs_server_t *server;
6894 + spin_lock(&afs_server_peer_lock);
6895 + server = peer->user;
6897 + afs_get_server(server);
6898 + spin_unlock(&afs_server_peer_lock);
6903 +/*****************************************************************************/
6905 + * AFS server callslot grant record
6907 +struct afs_server_callslot
6909 + struct list_head link; /* link in server's list */
6910 + struct task_struct *task; /* process waiting to make call */
6911 + struct rxrpc_connection *conn; /* connection to use (or NULL on error) */
6912 + short nconn; /* connection slot number (-1 on error) */
6913 + char ready; /* T when ready */
6914 + int errno; /* error number if nconn==-1 */
6917 +extern int afs_server_request_callslot(afs_server_t *server,
6918 + struct afs_server_callslot *callslot);
6920 +extern void afs_server_release_callslot(afs_server_t *server,
6921 + struct afs_server_callslot *callslot);
6923 +#endif /* _LINUX_AFS_SERVER_H */
6924 diff -urNp linux-5240/fs/afs/super.c linux-5250/fs/afs/super.c
6925 --- linux-5240/fs/afs/super.c 1970-01-01 01:00:00.000000000 +0100
6926 +++ linux-5250/fs/afs/super.c
6929 + * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
6931 + * This software may be freely redistributed under the terms of the
6932 + * GNU General Public License.
6934 + * You should have received a copy of the GNU General Public License
6935 + * along with this program; if not, write to the Free Software
6936 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
6938 + * Authors: David Howells <dhowells@redhat.com>
6939 + * David Woodhouse <dwmw2@cambridge.redhat.com>
6943 +#include <linux/kernel.h>
6944 +#include <linux/module.h>
6945 +#include <linux/init.h>
6946 +#include <linux/slab.h>
6947 +#include <linux/fs.h>
6948 +#include <linux/pagemap.h>
6950 +#include "volume.h"
6952 +#include "cmservice.h"
6953 +#include "fsclient.h"
6956 +#include "internal.h"
6958 +#define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */
6960 +static inline char *strdup(const char *s)
6962 + char *ns = kmalloc(strlen(s)+1,GFP_KERNEL);
6968 +static void afs_i_init_once(void *foo, kmem_cache_t *cachep, unsigned long flags);
6970 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
6971 +static struct super_block *afs_get_sb(struct file_system_type *fs_type,
6972 + int flags, char *dev_name, void *data);
6974 +static struct super_block *afs_read_super(struct super_block *sb, void *data, int);
6975 +static void afs_put_inode(struct inode *inode);
6978 +static struct inode *afs_alloc_inode(struct super_block *sb);
6980 +static void afs_put_super(struct super_block *sb);
6982 +static void afs_destroy_inode(struct inode *inode);
6984 +static struct file_system_type afs_fs_type = {
6985 + owner: THIS_MODULE,
6987 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
6988 + get_sb: afs_get_sb,
6989 + kill_sb: kill_anon_super,
6991 + read_super: afs_read_super,
6995 +static struct super_operations afs_super_ops = {
6996 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
6997 + statfs: simple_statfs,
6998 + alloc_inode: afs_alloc_inode,
6999 + drop_inode: generic_delete_inode,
7000 + destroy_inode: afs_destroy_inode,
7002 + put_inode: afs_put_inode,
7003 + read_inode2: afs_read_inode2,
7005 + clear_inode: afs_clear_inode,
7006 + put_super: afs_put_super,
7009 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7010 +static kmem_cache_t *afs_inode_cachep;
7014 +static const char *cachedev;
7016 +static afs_cache_t *afs_cache;
7018 +/*****************************************************************************/
7020 + * initialise the filesystem
7022 +int __init afs_fs_init(void)
7026 + /* open the cache */
7030 + printk(KERN_NOTICE "kAFS: No cache device specified as module parm\n");
7031 + printk(KERN_NOTICE "kAFS: Set with \"cachedev=<devname>\" on insmod's cmdline\n");
7035 + ret = afs_cache_open(cachedev,&afs_cache);
7037 + printk(KERN_NOTICE "kAFS: Failed to open cache device\n");
7042 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7043 + /* create ourselves an inode cache */
7045 + afs_inode_cachep = kmem_cache_create("afs_inode_cache",
7046 + sizeof(afs_vnode_t),
7048 + SLAB_HWCACHE_ALIGN,
7051 + if (!afs_inode_cachep) {
7052 + printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");
7054 + afs_put_cache(afs_cache);
7060 + /* now export our filesystem to lesser mortals */
7061 + ret = register_filesystem(&afs_fs_type);
7063 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7064 + kmem_cache_destroy(afs_inode_cachep);
7067 + afs_put_cache(afs_cache);
7073 +} /* end afs_fs_init() */
7075 +/*****************************************************************************/
7077 + * clean up the filesystem
7079 +void __exit afs_fs_exit(void)
7081 + /* destroy our private inode cache */
7082 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7083 + kmem_cache_destroy(afs_inode_cachep);
7086 + unregister_filesystem(&afs_fs_type);
7090 + afs_put_cache(afs_cache);
7092 +} /* end afs_fs_exit() */
7094 +/*****************************************************************************/
7096 + * check that an argument has a value
7098 +static int want_arg(char **_value, const char *option)
7100 + if (!_value || !*_value || !**_value) {
7101 + printk(KERN_NOTICE "kAFS: %s: argument missing\n",option);
7105 +} /* end want_arg() */
7107 +/*****************************************************************************/
7109 + * check that there is a value
7112 +static int want_value(char **_value, const char *option)
7114 + if (!_value || !*_value || !**_value) {
7115 + printk(KERN_NOTICE "kAFS: %s: argument incomplete\n",option);
7119 +} /* end want_value() */
7122 +/*****************************************************************************/
7124 + * check that there's no subsequent value
7126 +static int want_no_value(char *const *_value, const char *option)
7128 + if (*_value && **_value) {
7129 + printk(KERN_NOTICE "kAFS: %s: Invalid argument: %s\n",option,*_value);
7133 +} /* end want_no_value() */
7135 +/*****************************************************************************/
7137 + * extract a number from an option string value
7140 +static int want_number(char **_value, const char *option, unsigned long *number,
7141 + unsigned long limit)
7143 + char *value = *_value;
7145 + if (!want_value(_value,option))
7148 + *number = simple_strtoul(value,_value,0);
7150 + if (value==*_value) {
7151 + printk(KERN_NOTICE "kAFS: %s: Invalid number: %s\n",option,value);
7155 + if (*number>limit) {
7156 + printk(KERN_NOTICE "kAFS: %s: numeric value %lu > %lu\n",option,*number,limit);
7161 +} /* end want_number() */
7164 +/*****************************************************************************/
7166 + * extract a separator from an option string value
7169 +static int want_sep(char **_value, const char *option, char sep)
7171 + if (!want_value(_value,option))
7174 + if (*(*_value)++ != sep) {
7175 + printk(KERN_NOTICE "kAFS: %s: '%c' expected: %s\n",option,sep,*_value-1);
7180 +} /* end want_number() */
7183 +/*****************************************************************************/
7185 + * extract an IP address from an option string value
7188 +static int want_ipaddr(char **_value, const char *option, struct in_addr *addr)
7190 + unsigned long number[4];
7192 + if (!want_value(_value,option))
7195 + if (!want_number(_value,option,&number[0],255) ||
7196 + !want_sep(_value,option,'.') ||
7197 + !want_number(_value,option,&number[1],255) ||
7198 + !want_sep(_value,option,'.') ||
7199 + !want_number(_value,option,&number[2],255) ||
7200 + !want_sep(_value,option,'.') ||
7201 + !want_number(_value,option,&number[3],255))
7204 + ((u8*)addr)[0] = number[0];
7205 + ((u8*)addr)[1] = number[1];
7206 + ((u8*)addr)[2] = number[2];
7207 + ((u8*)addr)[3] = number[3];
7210 +} /* end want_numeric() */
7213 +/*****************************************************************************/
7215 + * parse the mount options
7216 + * - this function has been shamelessly adapted from the ext3 fs which shamelessly adapted it from
7219 +static int afs_super_parse_options(struct afs_super_info *as, char *options, char **devname)
7221 + char *key, *value;
7224 + kenter("%s",options);
7227 + while ((key = strsep(&options,",")))
7229 + value = strchr(key,'=');
7233 + printk("kAFS: KEY: %s, VAL:%s\n",key,value?:"-");
7235 + if (strcmp(key,"rwpath")==0) {
7236 + if (!want_no_value(&value,"rwpath")) return -EINVAL;
7240 + else if (strcmp(key,"vol")==0) {
7241 + if (!want_arg(&value,"vol")) return -EINVAL;
7247 + if (strcmp(key,"servers")==0) {
7248 + if (!want_arg(&value,"servers")) return -EINVAL;
7250 + _debug("servers=%s",value);
7253 + struct in_addr addr;
7255 + if (!want_ipaddr(&value,"servers",&addr))
7258 + ret = afs_create_server(as->cell,&addr,&as->server);
7260 + printk("kAFS: unable to create server: %d\n",ret);
7268 + printk(KERN_NOTICE
7269 + "kAFS: only one server can be specified\n");
7273 + if (!want_sep(&value,"servers",':'))
7280 + printk("kAFS: Unknown mount option: '%s'\n",key);
7288 + kleave(" = %d",ret);
7291 +} /* end afs_super_parse_options() */
7293 +/*****************************************************************************/
7295 + * fill in the superblock
7297 +static int afs_fill_super(struct super_block *sb, void *_data, int silent)
7299 + struct afs_super_info *as = NULL;
7300 + struct dentry *root = NULL;
7301 + struct inode *inode = NULL;
7303 + void **data = _data;
7304 + char *options, *devname;
7310 + kleave(" = -EINVAL");
7313 + devname = data[0];
7314 + options = data[1];
7316 + options[PAGE_SIZE-1] = 0;
7318 + /* allocate a superblock info record */
7319 + as = kmalloc(sizeof(struct afs_super_info),GFP_KERNEL);
7321 + kleave(" = -ENOMEM");
7325 + memset(as,0,sizeof(struct afs_super_info));
7327 + /* parse the options */
7329 + ret = afs_super_parse_options(as,options,&devname);
7333 + printk("kAFS: no volume name specified\n");
7339 + /* parse the device name */
7340 + ret = afs_volume_lookup(afs_cache,devname,as->rwparent,&as->volume);
7344 + /* fill in the superblock */
7345 + sb->s_blocksize = PAGE_CACHE_SIZE;
7346 + sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
7347 + sb->s_magic = AFS_FS_MAGIC;
7348 + sb->s_op = &afs_super_ops;
7349 + sb->u.generic_sbp = as;
7351 + /* allocate the root inode and dentry */
7352 + fid.vid = as->volume->vid;
7355 + ret = afs_iget(sb,&fid,&inode);
7360 + root = d_alloc_root(inode);
7364 + sb->s_root = root;
7370 + if (root) dput(root);
7371 + if (inode) iput(inode);
7373 + if (as->volume) afs_put_volume(as->volume);
7376 + sb->u.generic_sbp = NULL;
7378 + kleave(" = %d",ret);
7380 +} /* end afs_fill_super() */
7382 +/*****************************************************************************/
7384 + * get an AFS superblock
7385 + * - TODO: don't use get_sb_nodev(), but rather call sget() directly
7387 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7388 +static struct super_block *afs_get_sb(struct file_system_type *fs_type,
7393 + struct super_block *sb;
7394 + void *data[2] = { dev_name, options };
7397 + _enter(",,%s,%p",dev_name,options);
7399 + /* start the cache manager */
7400 + ret = afscm_start();
7402 + _leave(" = %d",ret);
7403 + return ERR_PTR(ret);
7406 + /* allocate a deviceless superblock */
7407 + sb = get_sb_nodev(fs_type,flags,data,afs_fill_super);
7415 +} /* end afs_get_sb() */
7418 +/*****************************************************************************/
7420 + * read an AFS superblock
7422 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
7423 +static struct super_block *afs_read_super(struct super_block *sb, void *options, int silent)
7425 + void *data[2] = { NULL, options };
7428 + kenter(",,%s",(char*)options);
7430 + /* start the cache manager */
7431 + ret = afscm_start();
7433 + kleave(" = NULL (%d)",ret);
7437 + /* allocate a deviceless superblock */
7438 + ret = afs_fill_super(sb,data,silent);
7441 + kleave(" = NULL (%d)",ret);
7445 + kleave(" = %p",sb);
7447 +} /* end afs_read_super() */
7450 +/*****************************************************************************/
7452 + * finish the unmounting process on the superblock
7454 +static void afs_put_super(struct super_block *sb)
7456 + struct afs_super_info *as = sb->u.generic_sbp;
7461 + if (as->volume) afs_put_volume(as->volume);
7464 + /* stop the cache manager */
7468 +} /* end afs_put_super() */
7470 +/*****************************************************************************/
7472 + * initialise an inode cache slab element prior to any use
7474 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7475 +static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, unsigned long flags)
7477 + afs_vnode_t *vnode = (afs_vnode_t *) _vnode;
7479 + if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) {
7480 + memset(vnode,0,sizeof(*vnode));
7481 + inode_init_once(&vnode->vfs_inode);
7482 + init_waitqueue_head(&vnode->update_waitq);
7483 + spin_lock_init(&vnode->lock);
7484 + INIT_LIST_HEAD(&vnode->cb_link);
7485 + INIT_LIST_HEAD(&vnode->cb_hash_link);
7486 + afs_timer_init(&vnode->cb_timeout,&afs_vnode_cb_timed_out_ops);
7489 +} /* end afs_i_init_once() */
7492 +/*****************************************************************************/
7494 + * allocate an AFS inode struct from our slab cache
7496 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7497 +static struct inode *afs_alloc_inode(struct super_block *sb)
7499 + afs_vnode_t *vnode;
7501 + vnode = (afs_vnode_t *) kmem_cache_alloc(afs_inode_cachep,SLAB_KERNEL);
7505 + memset(&vnode->fid,0,sizeof(vnode->fid));
7506 + memset(&vnode->status,0,sizeof(vnode->status));
7508 + vnode->volume = NULL;
7509 + vnode->update_cnt = 0;
7512 + return &vnode->vfs_inode;
7513 +} /* end afs_alloc_inode() */
7516 +/*****************************************************************************/
7520 +static void afs_put_inode(struct inode *inode)
7522 + if (inode->u.generic_ip) kfree(inode->u.generic_ip);
7524 +} /* end afs_put_inode() */
7526 +/*****************************************************************************/
7528 + * destroy an AFS inode struct
7530 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
7531 +static void afs_destroy_inode(struct inode *inode)
7533 + _enter("{%lu}",inode->i_ino);
7534 + kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode));
7535 +} /* end afs_destroy_inode() */
7537 diff -urNp linux-5240/fs/afs/super.h linux-5250/fs/afs/super.h
7538 --- linux-5240/fs/afs/super.h 1970-01-01 01:00:00.000000000 +0100
7539 +++ linux-5250/fs/afs/super.h
7541 +/* super.h: AFS filesystem internal private data
7543 + * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
7545 + * This software may be freely redistributed under the terms of the
7546 + * GNU General Public License.
7548 + * You should have received a copy of the GNU General Public License
7549 + * along with this program; if not, write to the Free Software
7550 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
7552 + * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
7553 + * David Howells <dhowells@redhat.com>
7557 +#ifndef _LINUX_AFS_SUPER_H
7558 +#define _LINUX_AFS_SUPER_H
7560 +#include <linux/fs.h>
7561 +#include "server.h"
7565 +/*****************************************************************************/
7567 + * AFS superblock private data
7568 + * - there's one superblock per volume
7570 +struct afs_super_info
7572 + afs_volume_t *volume; /* volume record */
7573 + char rwparent; /* T if parent is R/W AFS volume */
7576 +static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
7578 + return sb->u.generic_sbp;
7581 +#endif /* __KERNEL__ */
7583 +#endif /* _LINUX_AFS_SUPER_H */
7584 diff -urNp linux-5240/fs/afs/transport.h linux-5250/fs/afs/transport.h
7585 --- linux-5240/fs/afs/transport.h 1970-01-01 01:00:00.000000000 +0100
7586 +++ linux-5250/fs/afs/transport.h
7588 +/* transport.h: AFS transport management
7590 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
7591 + * Written by David Howells (dhowells@redhat.com)
7593 + * This program is free software; you can redistribute it and/or
7594 + * modify it under the terms of the GNU General Public License
7595 + * as published by the Free Software Foundation; either version
7596 + * 2 of the License, or (at your option) any later version.
7599 +#ifndef _LINUX_AFS_TRANSPORT_H
7600 +#define _LINUX_AFS_TRANSPORT_H
7603 +#include <rxrpc/transport.h>
7605 +/* the cache manager transport endpoint */
7606 +extern struct rxrpc_transport *afs_transport;
7608 +#endif /* _LINUX_AFS_TRANSPORT_H */
7609 diff -urNp linux-5240/fs/afs/types.h linux-5250/fs/afs/types.h
7610 --- linux-5240/fs/afs/types.h 1970-01-01 01:00:00.000000000 +0100
7611 +++ linux-5250/fs/afs/types.h
7613 +/* types.h: AFS types
7615 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
7616 + * Written by David Howells (dhowells@redhat.com)
7618 + * This program is free software; you can redistribute it and/or
7619 + * modify it under the terms of the GNU General Public License
7620 + * as published by the Free Software Foundation; either version
7621 + * 2 of the License, or (at your option) any later version.
7624 +#ifndef _LINUX_AFS_TYPES_H
7625 +#define _LINUX_AFS_TYPES_H
7628 +#include <rxrpc/types.h>
7629 +#endif /* __KERNEL__ */
7631 +typedef unsigned afs_volid_t;
7632 +typedef unsigned afs_vnodeid_t;
7633 +typedef unsigned long long afs_dataversion_t;
7635 +typedef struct afs_async_op afs_async_op_t;
7636 +typedef struct afs_cache afs_cache_t;
7637 +typedef struct afs_cache_volindex afs_cache_volindex_t;
7638 +typedef struct afs_callback afs_callback_t;
7639 +typedef struct afs_cell afs_cell_t;
7640 +typedef struct afs_fid afs_fid_t;
7641 +typedef struct afs_file_status afs_file_status_t;
7642 +typedef struct afs_server afs_server_t;
7643 +typedef struct afs_timer afs_timer_t;
7644 +typedef struct afs_vlocation afs_vlocation_t;
7645 +typedef struct afs_vnode afs_vnode_t;
7646 +typedef struct afs_volsync afs_volsync_t;
7647 +typedef struct afs_volume afs_volume_t;
7648 +typedef struct afs_volume_info afs_volume_info_t;
7650 +typedef struct afsvl_dbentry afsvl_dbentry_t;
7653 + AFSVL_RWVOL, /* read/write volume */
7654 + AFSVL_ROVOL, /* read-only volume */
7655 + AFSVL_BACKVOL, /* backup volume */
7658 +extern const char *afs_voltypes[];
7661 + AFS_FTYPE_INVALID = 0,
7662 + AFS_FTYPE_FILE = 1,
7663 + AFS_FTYPE_DIR = 2,
7664 + AFS_FTYPE_SYMLINK = 3,
7669 +/*****************************************************************************/
7671 + * AFS file identifier
7675 + afs_volid_t vid; /* volume ID */
7676 + afs_vnodeid_t vnode; /* file index within volume */
7677 + unsigned unique; /* unique ID number (file index version) */
7680 +/*****************************************************************************/
7682 + * AFS callback notification
7685 + AFSCM_CB_UNTYPED = 0, /* no type set on CB break */
7686 + AFSCM_CB_EXCLUSIVE = 1, /* CB exclusive to CM [not implemented] */
7687 + AFSCM_CB_SHARED = 2, /* CB shared by other CM's */
7688 + AFSCM_CB_DROPPED = 3, /* CB promise cancelled by file server */
7689 +} afs_callback_type_t;
7691 +struct afs_callback
7693 + afs_server_t *server; /* server that made the promise */
7694 + afs_fid_t fid; /* file identifier */
7695 + unsigned version; /* callback version */
7696 + unsigned expiry; /* time at which expires */
7697 + afs_callback_type_t type; /* type of callback */
7700 +#define AFSCBMAX 50
7702 +/*****************************************************************************/
7704 + * AFS volume information
7706 +struct afs_volume_info
7708 + afs_volid_t vid; /* volume ID */
7709 + afs_voltype_t type; /* type of this volume */
7710 + afs_volid_t type_vids[5]; /* volume ID's for possible types for this vol */
7712 + /* list of fileservers serving this volume */
7713 + size_t nservers; /* number of entries used in servers[] */
7715 + struct in_addr addr; /* fileserver address */
7719 +/*****************************************************************************/
7721 + * AFS file status information
7723 +struct afs_file_status
7725 + unsigned if_version; /* interface version */
7726 +#define AFS_FSTATUS_VERSION 1
7728 + afs_file_type_t type; /* file type */
7729 + unsigned nlink; /* link count */
7730 + size_t size; /* file size */
7731 + afs_dataversion_t version; /* current data version */
7732 + unsigned author; /* author ID */
7733 + unsigned owner; /* owner ID */
7734 + unsigned caller_access; /* access rights for authenticated caller */
7735 + unsigned anon_access; /* access rights for unauthenticated caller */
7736 + umode_t mode; /* UNIX mode */
7737 + afs_fid_t parent; /* parent file ID */
7738 + time_t mtime_client; /* last time client changed data */
7739 + time_t mtime_server; /* last time server changed data */
7742 +/*****************************************************************************/
7744 + * AFS volume synchronisation information
7748 + time_t creation; /* volume creation time */
7751 +#endif /* __KERNEL__ */
7753 +#endif /* _LINUX_AFS_TYPES_H */
7754 diff -urNp linux-5240/fs/afs/vlclient.c linux-5250/fs/afs/vlclient.c
7755 --- linux-5240/fs/afs/vlclient.c 1970-01-01 01:00:00.000000000 +0100
7756 +++ linux-5250/fs/afs/vlclient.c
7758 +/* vlclient.c: AFS Volume Location Service client
7760 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
7761 + * Written by David Howells (dhowells@redhat.com)
7763 + * This program is free software; you can redistribute it and/or
7764 + * modify it under the terms of the GNU General Public License
7765 + * as published by the Free Software Foundation; either version
7766 + * 2 of the License, or (at your option) any later version.
7769 +#include <linux/init.h>
7770 +#include <linux/sched.h>
7771 +#include <rxrpc/rxrpc.h>
7772 +#include <rxrpc/transport.h>
7773 +#include <rxrpc/connection.h>
7774 +#include <rxrpc/call.h>
7775 +#include "server.h"
7776 +#include "vlclient.h"
7777 +#include "kafsasyncd.h"
7778 +#include "kafstimod.h"
7779 +#include "errors.h"
7780 +#include "internal.h"
7782 +#define VLGETENTRYBYID 503 /* AFS Get Cache Entry By ID operation ID */
7783 +#define VLGETENTRYBYNAME 504 /* AFS Get Cache Entry By Name operation ID */
7784 +#define VLPROBE 514 /* AFS Probe Volume Location Service operation ID */
7786 +static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call);
7787 +static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call);
7789 +/*****************************************************************************/
7791 + * map afs VL abort codes to/from Linux error codes
7792 + * - called with call->lock held
7794 +static void afs_rxvl_aemap(struct rxrpc_call *call)
7798 + _enter("{%u,%u,%d}",call->app_err_state,call->app_abort_code,call->app_errno);
7800 + switch (call->app_err_state) {
7801 + case RXRPC_ESTATE_LOCAL_ABORT:
7802 + call->app_abort_code = -call->app_errno;
7805 + case RXRPC_ESTATE_PEER_ABORT:
7806 + switch (call->app_abort_code) {
7807 + case AFSVL_IDEXIST: err = -EEXIST; break;
7808 + case AFSVL_IO: err = -EREMOTEIO; break;
7809 + case AFSVL_NAMEEXIST: err = -EEXIST; break;
7810 + case AFSVL_CREATEFAIL: err = -EREMOTEIO; break;
7811 + case AFSVL_NOENT: err = -ENOMEDIUM; break;
7812 + case AFSVL_EMPTY: err = -ENOMEDIUM; break;
7813 + case AFSVL_ENTDELETED: err = -ENOMEDIUM; break;
7814 + case AFSVL_BADNAME: err = -EINVAL; break;
7815 + case AFSVL_BADINDEX: err = -EINVAL; break;
7816 + case AFSVL_BADVOLTYPE: err = -EINVAL; break;
7817 + case AFSVL_BADSERVER: err = -EINVAL; break;
7818 + case AFSVL_BADPARTITION: err = -EINVAL; break;
7819 + case AFSVL_REPSFULL: err = -EFBIG; break;
7820 + case AFSVL_NOREPSERVER: err = -ENOENT; break;
7821 + case AFSVL_DUPREPSERVER: err = -EEXIST; break;
7822 + case AFSVL_RWNOTFOUND: err = -ENOENT; break;
7823 + case AFSVL_BADREFCOUNT: err = -EINVAL; break;
7824 + case AFSVL_SIZEEXCEEDED: err = -EINVAL; break;
7825 + case AFSVL_BADENTRY: err = -EINVAL; break;
7826 + case AFSVL_BADVOLIDBUMP: err = -EINVAL; break;
7827 + case AFSVL_IDALREADYHASHED: err = -EINVAL; break;
7828 + case AFSVL_ENTRYLOCKED: err = -EBUSY; break;
7829 + case AFSVL_BADVOLOPER: err = -EBADRQC; break;
7830 + case AFSVL_BADRELLOCKTYPE: err = -EINVAL; break;
7831 + case AFSVL_RERELEASE: err = -EREMOTEIO; break;
7832 + case AFSVL_BADSERVERFLAG: err = -EINVAL; break;
7833 + case AFSVL_PERM: err = -EACCES; break;
7834 + case AFSVL_NOMEM: err = -EREMOTEIO; break;
7836 + err = afs_abort_to_error(call->app_abort_code);
7839 + call->app_errno = err;
7845 +} /* end afs_rxvl_aemap() */
7847 +/*****************************************************************************/
7849 + * probe a volume location server to see if it is still alive
7851 +int afs_rxvl_probe(afs_server_t *server, int alloc_flags)
7853 + DECLARE_WAITQUEUE(myself,current);
7855 + struct rxrpc_connection *conn;
7856 + struct rxrpc_call *call;
7857 + struct iovec piov[1];
7862 + /* get hold of the vlserver connection */
7863 + ret = afs_server_get_vlconn(server,&conn);
7867 + /* create a call through that connection */
7868 + ret = rxrpc_create_call(conn,NULL,NULL,afs_rxvl_aemap,&call);
7870 + printk("kAFS: Unable to create call: %d\n",ret);
7871 + goto out_put_conn;
7873 + call->app_opcode = VLPROBE;
7875 + /* we want to get event notifications from the call */
7876 + add_wait_queue(&call->waitq,&myself);
7878 + /* marshall the parameters */
7879 + param[0] = htonl(VLPROBE);
7880 + piov[0].iov_len = sizeof(param);
7881 + piov[0].iov_base = param;
7883 + /* send the parameters to the server */
7884 + ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,alloc_flags,0,&sent);
7888 + /* wait for the reply to completely arrive */
7890 + set_current_state(TASK_INTERRUPTIBLE);
7891 + if (call->app_call_state!=RXRPC_CSTATE_CLNT_RCV_REPLY ||
7892 + signal_pending(current))
7896 + set_current_state(TASK_RUNNING);
7899 + if (signal_pending(current))
7902 + switch (call->app_call_state) {
7903 + case RXRPC_CSTATE_ERROR:
7904 + ret = call->app_errno;
7907 + case RXRPC_CSTATE_CLNT_GOT_REPLY:
7916 + set_current_state(TASK_UNINTERRUPTIBLE);
7917 + rxrpc_call_abort(call,ret);
7921 + set_current_state(TASK_RUNNING);
7922 + remove_wait_queue(&call->waitq,&myself);
7923 + rxrpc_put_call(call);
7925 + rxrpc_put_connection(conn);
7929 +} /* end afs_rxvl_probe() */
7931 +/*****************************************************************************/
7933 + * look up a volume location database entry by name
7935 +int afs_rxvl_get_entry_by_name(afs_server_t *server, const char *volname,
7936 + struct afs_cache_volume *entry)
7938 + DECLARE_WAITQUEUE(myself,current);
7940 + struct rxrpc_connection *conn;
7941 + struct rxrpc_call *call;
7942 + struct iovec piov[3];
7946 + u32 *bp, param[2], zero;
7948 + _enter(",%s,",volname);
7950 + memset(entry,0,sizeof(*entry));
7952 + /* get hold of the vlserver connection */
7953 + ret = afs_server_get_vlconn(server,&conn);
7957 + /* create a call through that connection */
7958 + ret = rxrpc_create_call(conn,NULL,NULL,afs_rxvl_aemap,&call);
7960 + printk("kAFS: Unable to create call: %d\n",ret);
7961 + goto out_put_conn;
7963 + call->app_opcode = VLGETENTRYBYNAME;
7965 + /* we want to get event notifications from the call */
7966 + add_wait_queue(&call->waitq,&myself);
7968 + /* marshall the parameters */
7969 + piov[1].iov_len = strlen(volname);
7970 + piov[1].iov_base = (char*)volname;
7973 + piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
7974 + piov[2].iov_base = &zero;
7976 + param[0] = htonl(VLGETENTRYBYNAME);
7977 + param[1] = htonl(piov[1].iov_len);
7979 + piov[0].iov_len = sizeof(param);
7980 + piov[0].iov_base = param;
7982 + /* send the parameters to the server */
7983 + ret = rxrpc_call_write_data(call,3,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
7987 + /* wait for the reply to completely arrive */
7988 + bp = rxrpc_call_alloc_scratch(call,384);
7990 + ret = rxrpc_call_read_data(call,bp,384,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
7992 + if (ret==-ECONNABORTED) {
7993 + ret = call->app_errno;
7999 + /* unmarshall the reply */
8000 + for (loop=0; loop<64; loop++)
8001 + entry->name[loop] = ntohl(*bp++);
8002 + bp++; /* final NUL */
8005 + entry->nservers = ntohl(*bp++);
8007 + for (loop=0; loop<8; loop++)
8008 + entry->servers[loop].s_addr = *bp++;
8010 + bp += 8; /* partition IDs */
8012 + for (loop=0; loop<8; loop++) {
8013 + tmp = ntohl(*bp++);
8014 + if (tmp & AFS_VLSF_RWVOL ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RW;
8015 + if (tmp & AFS_VLSF_ROVOL ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RO;
8016 + if (tmp & AFS_VLSF_BACKVOL) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_BAK;
8019 + entry->vid[0] = ntohl(*bp++);
8020 + entry->vid[1] = ntohl(*bp++);
8021 + entry->vid[2] = ntohl(*bp++);
8023 + bp++; /* clone ID */
8025 + tmp = ntohl(*bp++); /* flags */
8026 + if (tmp & AFS_VLF_RWEXISTS ) entry->vidmask |= AFS_CACHE_VOL_STM_RW;
8027 + if (tmp & AFS_VLF_ROEXISTS ) entry->vidmask |= AFS_CACHE_VOL_STM_RO;
8028 + if (tmp & AFS_VLF_BACKEXISTS) entry->vidmask |= AFS_CACHE_VOL_STM_BAK;
8031 + if (!entry->vidmask)
8035 + entry->ctime = xtime.tv_sec;
8039 + set_current_state(TASK_RUNNING);
8040 + remove_wait_queue(&call->waitq,&myself);
8041 + rxrpc_put_call(call);
8043 + rxrpc_put_connection(conn);
8045 + _leave(" = %d",ret);
8049 + set_current_state(TASK_UNINTERRUPTIBLE);
8050 + rxrpc_call_abort(call,ret);
8053 +} /* end afs_rxvl_get_entry_by_name() */
8055 +/*****************************************************************************/
8057 + * look up a volume location database entry by ID
8059 +int afs_rxvl_get_entry_by_id(afs_server_t *server,
8060 + afs_volid_t volid,
8061 + afs_voltype_t voltype,
8062 + struct afs_cache_volume *entry)
8064 + DECLARE_WAITQUEUE(myself,current);
8066 + struct rxrpc_connection *conn;
8067 + struct rxrpc_call *call;
8068 + struct iovec piov[1];
8072 + u32 *bp, param[3];
8074 + _enter(",%x,%d,",volid,voltype);
8076 + memset(entry,0,sizeof(*entry));
8078 + /* get hold of the vlserver connection */
8079 + ret = afs_server_get_vlconn(server,&conn);
8083 + /* create a call through that connection */
8084 + ret = rxrpc_create_call(conn,NULL,NULL,afs_rxvl_aemap,&call);
8086 + printk("kAFS: Unable to create call: %d\n",ret);
8087 + goto out_put_conn;
8089 + call->app_opcode = VLGETENTRYBYID;
8091 + /* we want to get event notifications from the call */
8092 + add_wait_queue(&call->waitq,&myself);
8094 + /* marshall the parameters */
8095 + param[0] = htonl(VLGETENTRYBYID);
8096 + param[1] = htonl(volid);
8097 + param[2] = htonl(voltype);
8099 + piov[0].iov_len = sizeof(param);
8100 + piov[0].iov_base = param;
8102 + /* send the parameters to the server */
8103 + ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
8107 + /* wait for the reply to completely arrive */
8108 + bp = rxrpc_call_alloc_scratch(call,384);
8110 + ret = rxrpc_call_read_data(call,bp,384,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
8112 + if (ret==-ECONNABORTED) {
8113 + ret = call->app_errno;
8119 + /* unmarshall the reply */
8120 + for (loop=0; loop<64; loop++)
8121 + entry->name[loop] = ntohl(*bp++);
8122 + bp++; /* final NUL */
8125 + entry->nservers = ntohl(*bp++);
8127 + for (loop=0; loop<8; loop++)
8128 + entry->servers[loop].s_addr = *bp++;
8130 + bp += 8; /* partition IDs */
8132 + for (loop=0; loop<8; loop++) {
8133 + tmp = ntohl(*bp++);
8134 + if (tmp & AFS_VLSF_RWVOL ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RW;
8135 + if (tmp & AFS_VLSF_ROVOL ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RO;
8136 + if (tmp & AFS_VLSF_BACKVOL) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_BAK;
8139 + entry->vid[0] = ntohl(*bp++);
8140 + entry->vid[1] = ntohl(*bp++);
8141 + entry->vid[2] = ntohl(*bp++);
8143 + bp++; /* clone ID */
8145 + tmp = ntohl(*bp++); /* flags */
8146 + if (tmp & AFS_VLF_RWEXISTS ) entry->vidmask |= AFS_CACHE_VOL_STM_RW;
8147 + if (tmp & AFS_VLF_ROEXISTS ) entry->vidmask |= AFS_CACHE_VOL_STM_RO;
8148 + if (tmp & AFS_VLF_BACKEXISTS) entry->vidmask |= AFS_CACHE_VOL_STM_BAK;
8151 + if (!entry->vidmask)
8154 +#if 0 /* TODO: remove */
8155 + entry->nservers = 3;
8156 + entry->servers[0].s_addr = htonl(0xac101249);
8157 + entry->servers[1].s_addr = htonl(0xac101243);
8158 + entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
8160 + entry->srvtmask[0] = AFS_CACHE_VOL_STM_RO;
8161 + entry->srvtmask[1] = AFS_CACHE_VOL_STM_RO;
8162 + entry->srvtmask[2] = AFS_CACHE_VOL_STM_RO | AFS_CACHE_VOL_STM_RW;
8166 + entry->ctime = xtime.tv_sec;
8170 + set_current_state(TASK_RUNNING);
8171 + remove_wait_queue(&call->waitq,&myself);
8172 + rxrpc_put_call(call);
8174 + rxrpc_put_connection(conn);
8176 + _leave(" = %d",ret);
8180 + set_current_state(TASK_UNINTERRUPTIBLE);
8181 + rxrpc_call_abort(call,ret);
8184 +} /* end afs_rxvl_get_entry_by_id() */
8186 +/*****************************************************************************/
8188 + * look up a volume location database entry by ID asynchronously
8190 +int afs_rxvl_get_entry_by_id_async(afs_async_op_t *op,
8191 + afs_volid_t volid,
8192 + afs_voltype_t voltype)
8194 + struct rxrpc_connection *conn;
8195 + struct rxrpc_call *call;
8196 + struct iovec piov[1];
8201 + _enter(",%x,%d,",volid,voltype);
8203 + /* get hold of the vlserver connection */
8204 + ret = afs_server_get_vlconn(op->server,&conn);
8206 + _leave(" = %d",ret);
8210 + /* create a call through that connection */
8211 + ret = rxrpc_create_call(conn,
8212 + afs_rxvl_get_entry_by_id_attn,
8213 + afs_rxvl_get_entry_by_id_error,
8216 + rxrpc_put_connection(conn);
8219 + printk("kAFS: Unable to create call: %d\n",ret);
8220 + _leave(" = %d",ret);
8224 + op->call->app_opcode = VLGETENTRYBYID;
8225 + op->call->app_user = op;
8228 + rxrpc_get_call(call);
8230 + /* send event notifications from the call to kafsasyncd */
8231 + afs_kafsasyncd_begin_op(op);
8233 + /* marshall the parameters */
8234 + param[0] = htonl(VLGETENTRYBYID);
8235 + param[1] = htonl(volid);
8236 + param[2] = htonl(voltype);
8238 + piov[0].iov_len = sizeof(param);
8239 + piov[0].iov_base = param;
8241 + /* allocate result read buffer in scratch space */
8242 + call->app_scr_ptr = rxrpc_call_alloc_scratch(op->call,384);
8244 + /* send the parameters to the server */
8245 + ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
8247 + rxrpc_call_abort(call,ret); /* handle from kafsasyncd */
8252 + /* wait for the reply to completely arrive */
8253 + ret = rxrpc_call_read_data(call,call->app_scr_ptr,384,0);
8257 + case -ECONNABORTED:
8259 + break; /* all handled by kafsasyncd */
8262 + rxrpc_call_abort(call,ret); /* force kafsasyncd to handle it */
8268 + rxrpc_put_call(call);
8269 + _leave(" = %d",ret);
8272 +} /* end afs_rxvl_get_entry_by_id_async() */
8274 +/*****************************************************************************/
8276 + * attend to the asynchronous get VLDB entry by ID
8278 +int afs_rxvl_get_entry_by_id_async2(afs_async_op_t *op,
8279 + struct afs_cache_volume *entry)
8281 + unsigned *bp, tmp;
8284 + _enter("{op=%p cst=%u}",op,op->call->app_call_state);
8286 + memset(entry,0,sizeof(*entry));
8288 + if (op->call->app_call_state==RXRPC_CSTATE_COMPLETE) {
8289 + /* operation finished */
8290 + afs_kafsasyncd_terminate_op(op);
8292 + bp = op->call->app_scr_ptr;
8294 + /* unmarshall the reply */
8295 + for (loop=0; loop<64; loop++)
8296 + entry->name[loop] = ntohl(*bp++);
8297 + bp++; /* final NUL */
8300 + entry->nservers = ntohl(*bp++);
8302 + for (loop=0; loop<8; loop++)
8303 + entry->servers[loop].s_addr = *bp++;
8305 + bp += 8; /* partition IDs */
8307 + for (loop=0; loop<8; loop++) {
8308 + tmp = ntohl(*bp++);
8309 + if (tmp & AFS_VLSF_RWVOL ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RW;
8310 + if (tmp & AFS_VLSF_ROVOL ) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_RO;
8311 + if (tmp & AFS_VLSF_BACKVOL) entry->srvtmask[loop] |= AFS_CACHE_VOL_STM_BAK;
8314 + entry->vid[0] = ntohl(*bp++);
8315 + entry->vid[1] = ntohl(*bp++);
8316 + entry->vid[2] = ntohl(*bp++);
8318 + bp++; /* clone ID */
8320 + tmp = ntohl(*bp++); /* flags */
8321 + if (tmp & AFS_VLF_RWEXISTS ) entry->vidmask |= AFS_CACHE_VOL_STM_RW;
8322 + if (tmp & AFS_VLF_ROEXISTS ) entry->vidmask |= AFS_CACHE_VOL_STM_RO;
8323 + if (tmp & AFS_VLF_BACKEXISTS) entry->vidmask |= AFS_CACHE_VOL_STM_BAK;
8326 + if (!entry->vidmask) {
8327 + rxrpc_call_abort(op->call,ret);
8331 +#if 0 /* TODO: remove */
8332 + entry->nservers = 3;
8333 + entry->servers[0].s_addr = htonl(0xac101249);
8334 + entry->servers[1].s_addr = htonl(0xac101243);
8335 + entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
8337 + entry->srvtmask[0] = AFS_CACHE_VOL_STM_RO;
8338 + entry->srvtmask[1] = AFS_CACHE_VOL_STM_RO;
8339 + entry->srvtmask[2] = AFS_CACHE_VOL_STM_RO | AFS_CACHE_VOL_STM_RW;
8343 + entry->ctime = xtime.tv_sec;
8348 + if (op->call->app_call_state==RXRPC_CSTATE_ERROR) {
8349 + /* operation error */
8350 + ret = op->call->app_errno;
8354 + _leave(" = -EAGAIN");
8358 + rxrpc_put_call(op->call);
8360 + _leave(" = %d",ret);
8362 +} /* end afs_rxvl_get_entry_by_id_async2() */
8364 +/*****************************************************************************/
8366 + * handle attention events on an async get-entry-by-ID op
8367 + * - called from krxiod
8369 +static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call)
8371 + afs_async_op_t *op = call->app_user;
8373 + _enter("{op=%p cst=%u}",op,call->app_call_state);
8375 + switch (call->app_call_state) {
8376 + case RXRPC_CSTATE_COMPLETE:
8377 + afs_kafsasyncd_attend_op(op);
8379 + case RXRPC_CSTATE_CLNT_RCV_REPLY:
8380 + if (call->app_async_read)
8382 + case RXRPC_CSTATE_CLNT_GOT_REPLY:
8383 + if (call->app_read_count==0)
8385 + printk("kAFS: Reply bigger than expected {cst=%u asyn=%d mark=%d rdy=%u pr=%u%s}",
8386 + call->app_call_state,
8387 + call->app_async_read,
8389 + call->app_ready_qty,
8390 + call->pkt_rcv_count,
8391 + call->app_last_rcv ? " last" : "");
8393 + rxrpc_call_abort(call,-EBADMSG);
8401 +} /* end afs_rxvl_get_entry_by_id_attn() */
8403 +/*****************************************************************************/
8405 + * handle error events on an async get-entry-by-ID op
8406 + * - called from krxiod
8408 +static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call)
8410 + afs_async_op_t *op = call->app_user;
8412 + _enter("{op=%p cst=%u}",op,call->app_call_state);
8414 + afs_kafsasyncd_attend_op(op);
8418 +} /* end afs_rxvl_get_entry_by_id_error() */
8419 diff -urNp linux-5240/fs/afs/vlclient.h linux-5250/fs/afs/vlclient.h
8420 --- linux-5240/fs/afs/vlclient.h 1970-01-01 01:00:00.000000000 +0100
8421 +++ linux-5250/fs/afs/vlclient.h
8423 +/* vlclient.h: Volume Location Service client interface
8425 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
8426 + * Written by David Howells (dhowells@redhat.com)
8428 + * This program is free software; you can redistribute it and/or
8429 + * modify it under the terms of the GNU General Public License
8430 + * as published by the Free Software Foundation; either version
8431 + * 2 of the License, or (at your option) any later version.
8434 +#ifndef _LINUX_AFS_VLCLIENT_H
8435 +#define _LINUX_AFS_VLCLIENT_H
8438 +#include "cache-layout.h"
8440 +enum AFSVL_Errors {
8441 + AFSVL_IDEXIST = 363520, /* Volume Id entry exists in vl database */
8442 + AFSVL_IO = 363521, /* I/O related error */
8443 + AFSVL_NAMEEXIST = 363522, /* Volume name entry exists in vl database */
8444 + AFSVL_CREATEFAIL = 363523, /* Internal creation failure */
8445 + AFSVL_NOENT = 363524, /* No such entry */
8446 + AFSVL_EMPTY = 363525, /* Vl database is empty */
8447 + AFSVL_ENTDELETED = 363526, /* Entry is deleted (soft delete) */
8448 + AFSVL_BADNAME = 363527, /* Volume name is illegal */
8449 + AFSVL_BADINDEX = 363528, /* Index is out of range */
8450 + AFSVL_BADVOLTYPE = 363529, /* Bad volume type */
8451 + AFSVL_BADSERVER = 363530, /* Illegal server number (out of range) */
8452 + AFSVL_BADPARTITION = 363531, /* Bad partition number */
8453 + AFSVL_REPSFULL = 363532, /* Run out of space for Replication sites */
8454 + AFSVL_NOREPSERVER = 363533, /* No such Replication server site exists */
8455 + AFSVL_DUPREPSERVER = 363534, /* Replication site already exists */
8456 + AFSVL_RWNOTFOUND = 363535, /* Parent R/W entry not found */
8457 + AFSVL_BADREFCOUNT = 363536, /* Illegal Reference Count number */
8458 + AFSVL_SIZEEXCEEDED = 363537, /* Vl size for attributes exceeded */
8459 + AFSVL_BADENTRY = 363538, /* Bad incoming vl entry */
8460 + AFSVL_BADVOLIDBUMP = 363539, /* Illegal max volid increment */
8461 + AFSVL_IDALREADYHASHED = 363540, /* RO/BACK id already hashed */
8462 + AFSVL_ENTRYLOCKED = 363541, /* Vl entry is already locked */
8463 + AFSVL_BADVOLOPER = 363542, /* Bad volume operation code */
8464 + AFSVL_BADRELLOCKTYPE = 363543, /* Bad release lock type */
8465 + AFSVL_RERELEASE = 363544, /* Status report: last release was aborted */
8466 + AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server °ag */
8467 + AFSVL_PERM = 363546, /* No permission access */
8468 + AFSVL_NOMEM = 363547, /* malloc/realloc failed to alloc enough memory */
8471 +/* maps to "struct vldbentry" in vvl-spec.pdf */
8472 +struct afsvl_dbentry {
8473 + char name[65]; /* name of volume (including NUL char) */
8474 + afs_voltype_t type; /* volume type */
8475 + unsigned num_servers; /* num servers that hold instances of this vol */
8476 + unsigned clone_id; /* cloning ID */
8479 +#define AFS_VLF_RWEXISTS 0x1000 /* R/W volume exists */
8480 +#define AFS_VLF_ROEXISTS 0x2000 /* R/O volume exists */
8481 +#define AFS_VLF_BACKEXISTS 0x4000 /* backup volume exists */
8483 + afs_volid_t volume_ids[3]; /* volume IDs */
8486 + struct in_addr addr; /* server address */
8487 + unsigned partition; /* partition ID on this server */
8488 + unsigned flags; /* server specific flags */
8489 +#define AFS_VLSF_NEWREPSITE 0x0001 /* unused */
8490 +#define AFS_VLSF_ROVOL 0x0002 /* this server holds a R/O instance of the volume */
8491 +#define AFS_VLSF_RWVOL 0x0004 /* this server holds a R/W instance of the volume */
8492 +#define AFS_VLSF_BACKVOL 0x0008 /* this server holds a backup instance of the volume */
8497 +/* probe a volume location server to see if it is still alive */
8498 +extern int afs_rxvl_probe(afs_server_t *server, int alloc_flags);
8500 +/* look up a volume location database entry by name */
8501 +extern int afs_rxvl_get_entry_by_name(afs_server_t *server,
8502 + const char *volname,
8503 + struct afs_cache_volume *entry);
8505 +/* look up a volume location database entry by ID */
8506 +extern int afs_rxvl_get_entry_by_id(afs_server_t *server,
8507 + afs_volid_t volid,
8508 + afs_voltype_t voltype,
8509 + struct afs_cache_volume *entry);
8511 +extern int afs_rxvl_get_entry_by_id_async(afs_async_op_t *op,
8512 + afs_volid_t volid,
8513 + afs_voltype_t voltype);
8515 +extern int afs_rxvl_get_entry_by_id_async2(afs_async_op_t *op,
8516 + struct afs_cache_volume *entry);
8518 +#endif /* _LINUX_AFS_VLCLIENT_H */
8519 diff -urNp linux-5240/fs/afs/vlocation.c linux-5250/fs/afs/vlocation.c
8520 --- linux-5240/fs/afs/vlocation.c 1970-01-01 01:00:00.000000000 +0100
8521 +++ linux-5250/fs/afs/vlocation.c
8523 +/* vlocation.c: volume location management
8525 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
8526 + * Written by David Howells (dhowells@redhat.com)
8528 + * This program is free software; you can redistribute it and/or
8529 + * modify it under the terms of the GNU General Public License
8530 + * as published by the Free Software Foundation; either version
8531 + * 2 of the License, or (at your option) any later version.
8534 +#include <linux/kernel.h>
8535 +#include <linux/module.h>
8536 +#include <linux/init.h>
8537 +#include <linux/slab.h>
8538 +#include <linux/fs.h>
8539 +#include <linux/pagemap.h>
8540 +#include "volume.h"
8542 +#include "cmservice.h"
8543 +#include "fsclient.h"
8544 +#include "vlclient.h"
8546 +#include "kafstimod.h"
8547 +#include <rxrpc/connection.h>
8548 +#include "internal.h"
8550 +#define AFS_VLDB_TIMEOUT HZ*1000
8552 +static void afs_vlocation_update_timer(afs_timer_t *timer);
8553 +static void afs_vlocation_update_attend(afs_async_op_t *op);
8554 +static void afs_vlocation_update_discard(afs_async_op_t *op);
8556 +static void __afs_vlocation_timeout(afs_timer_t *timer)
8558 + afs_vlocation_t *vlocation = list_entry(timer,afs_vlocation_t,timeout);
8560 + _debug("VL TIMEOUT [%s{u=%d}]",vlocation->vldb.name,atomic_read(&vlocation->usage));
8562 + afs_vlocation_do_timeout(vlocation);
8565 +static const struct afs_timer_ops afs_vlocation_timer_ops = {
8566 + timed_out: __afs_vlocation_timeout,
8569 +static const struct afs_timer_ops afs_vlocation_update_timer_ops = {
8570 + timed_out: afs_vlocation_update_timer,
8573 +static const struct afs_async_op_ops afs_vlocation_update_op_ops = {
8574 + attend: afs_vlocation_update_attend,
8575 + discard: afs_vlocation_update_discard,
8578 +static LIST_HEAD(afs_vlocation_update_pendq); /* queue of VLs awaiting update */
8579 +static afs_vlocation_t *afs_vlocation_update; /* VL currently being updated */
8580 +static spinlock_t afs_vlocation_update_lock = SPIN_LOCK_UNLOCKED; /* lock guarding update queue */
8582 +/*****************************************************************************/
8584 + * iterate through the VL servers in a cell until one of them admits knowing about the volume in
8586 + * - caller must have cell->vl_sem write-locked
8588 +static int afs_vlocation_access_vl_by_name(afs_vlocation_t *vlocation,
8590 + struct afs_cache_volume *vldb)
8592 + afs_server_t *server = NULL;
8593 + afs_cell_t *cell = vlocation->cell;
8596 + _enter("%s,%s,",cell->name,name);
8599 + for (count=cell->vl_naddrs; count>0; count--) {
8600 + _debug("CellServ[%hu]: %08x",
8601 + cell->vl_curr_svix,cell->vl_addrs[cell->vl_curr_svix].s_addr);
8603 + /* try and create a server */
8604 + ret = afs_server_lookup(cell,&cell->vl_addrs[cell->vl_curr_svix],&server);
8615 + /* attempt to access the VL server */
8616 + ret = afs_rxvl_get_entry_by_name(server,name,vldb);
8619 + afs_put_server(server);
8620 + vlocation->vldb.cell_ix = cell->cache_ix;
8624 + case -ENETUNREACH:
8625 + case -EHOSTUNREACH:
8626 + case -ECONNREFUSED:
8627 + down_write(&server->sem);
8628 + if (server->vlserver) {
8629 + rxrpc_put_connection(server->vlserver);
8630 + server->vlserver = NULL;
8632 + up_write(&server->sem);
8633 + afs_put_server(server);
8634 + if (ret==-ENOMEM || ret==-ENONET)
8638 + afs_put_server(server);
8641 + afs_put_server(server);
8646 + /* rotate the server records upon lookup failure */
8648 + cell->vl_curr_svix++;
8649 + cell->vl_curr_svix %= cell->vl_naddrs;
8653 + _leave(" = %d",ret);
8656 +} /* end afs_vlocation_access_vl_by_name() */
8658 +/*****************************************************************************/
8660 + * iterate through the VL servers in a cell until one of them admits knowing about the volume in
8662 + * - caller must have cell->vl_sem write-locked
8664 +static int afs_vlocation_access_vl_by_id(afs_vlocation_t *vlocation,
8665 + afs_volid_t volid,
8666 + afs_voltype_t voltype,
8667 + struct afs_cache_volume *vldb)
8669 + afs_server_t *server = NULL;
8670 + afs_cell_t *cell = vlocation->cell;
8673 + _enter("%s,%x,%d,",cell->name,volid,voltype);
8676 + for (count=cell->vl_naddrs; count>0; count--) {
8677 + _debug("CellServ[%hu]: %08x",
8678 + cell->vl_curr_svix,cell->vl_addrs[cell->vl_curr_svix].s_addr);
8680 + /* try and create a server */
8681 + ret = afs_server_lookup(cell,&cell->vl_addrs[cell->vl_curr_svix],&server);
8692 + /* attempt to access the VL server */
8693 + ret = afs_rxvl_get_entry_by_id(server,volid,voltype,vldb);
8696 + afs_put_server(server);
8697 + vlocation->vldb.cell_ix = cell->cache_ix;
8701 + case -ENETUNREACH:
8702 + case -EHOSTUNREACH:
8703 + case -ECONNREFUSED:
8704 + down_write(&server->sem);
8705 + if (server->vlserver) {
8706 + rxrpc_put_connection(server->vlserver);
8707 + server->vlserver = NULL;
8709 + up_write(&server->sem);
8710 + afs_put_server(server);
8711 + if (ret==-ENOMEM || ret==-ENONET)
8715 + afs_put_server(server);
8718 + afs_put_server(server);
8723 + /* rotate the server records upon lookup failure */
8725 + cell->vl_curr_svix++;
8726 + cell->vl_curr_svix %= cell->vl_naddrs;
8730 + _leave(" = %d",ret);
8733 +} /* end afs_vlocation_access_vl_by_id() */
8735 +/*****************************************************************************/
8737 + * lookup volume location
8738 + * - caller must have cell->vol_sem write-locked
8739 + * - iterate through the VL servers in a cell until one of them admits knowing about the volume in
8741 + * - lookup in the local cache if not able to find on the VL server
8742 + * - insert/update in the local cache if did get a VL response
8744 +int afs_vlocation_lookup(afs_cache_t *cache, afs_cell_t *cell, const char *name,
8745 + afs_vlocation_t **_vlocation)
8747 + struct afs_cache_volume vldb;
8748 + struct list_head *_p;
8749 + afs_vlocation_t *vlocation;
8750 + afs_voltype_t voltype;
8752 + int active = 0, ret;
8754 + _enter(",%s,%s,",cell->name,name);
8756 + if (strlen(name)>sizeof(vlocation->vldb.name)) {
8757 + _leave(" = -ENAMETOOLONG");
8758 + return -ENAMETOOLONG;
8761 + /* search the cell's active list first */
8762 + list_for_each(_p,&cell->vl_list) {
8763 + vlocation = list_entry(_p,afs_vlocation_t,link);
8764 + if (strncmp(vlocation->vldb.name,name,sizeof(vlocation->vldb.name))==0)
8765 + goto found_in_memory;
8768 + /* search the cell's graveyard list second */
8769 + spin_lock(&cell->vl_gylock);
8770 + list_for_each(_p,&cell->vl_graveyard) {
8771 + vlocation = list_entry(_p,afs_vlocation_t,link);
8772 + if (strncmp(vlocation->vldb.name,name,sizeof(vlocation->vldb.name))==0)
8773 + goto found_in_graveyard;
8775 + spin_unlock(&cell->vl_gylock);
8777 + /* not in the cell's in-memory lists - create a new record */
8778 + vlocation = kmalloc(sizeof(afs_vlocation_t),GFP_KERNEL);
8782 + memset(vlocation,0,sizeof(afs_vlocation_t));
8783 + atomic_set(&vlocation->usage,1);
8784 + INIT_LIST_HEAD(&vlocation->link);
8785 + rwlock_init(&vlocation->lock);
8786 + strncpy(vlocation->vldb.name,name,sizeof(vlocation->vldb.name));
8788 + afs_timer_init(&vlocation->timeout,&afs_vlocation_timer_ops);
8789 + afs_timer_init(&vlocation->upd_timer,&afs_vlocation_update_timer_ops);
8790 + afs_async_op_init(&vlocation->upd_op,&afs_vlocation_update_op_ops);
8793 + afs_get_cache(cache);
8794 + vlocation->cache = cache;
8796 + afs_get_cell(cell);
8797 + vlocation->cell = cell;
8798 + vlocation->vldb.cell_ix = cell->cache_ix;
8800 + list_add_tail(&vlocation->link,&cell->vl_list);
8803 + /* search local cache if wasn't in memory */
8804 + ret = afs_cache_lookup_vlocation(vlocation);
8806 + default: goto error; /* disk error */
8807 + case 0: goto found_in_cache; /* pulled from local cache into memory */
8808 + case -ENOENT: break; /* not in local cache */
8812 + /* try to look up an unknown volume in the cell VL databases by name */
8813 + ret = afs_vlocation_access_vl_by_name(vlocation,name,&vldb);
8815 + printk("kAFS: failed to locate '%s' in cell '%s'\n",name,cell->name);
8819 + goto found_on_vlserver;
8821 + found_in_graveyard:
8822 + /* found in the graveyard - resurrect */
8823 + _debug("found in graveyard");
8824 + atomic_inc(&vlocation->usage);
8825 + list_del(&vlocation->link);
8826 + list_add_tail(&vlocation->link,&cell->vl_list);
8827 + spin_unlock(&cell->vl_gylock);
8829 + afs_kafstimod_del_timer(&vlocation->timeout);
8833 + /* found in memory - check to see if it's active */
8834 + _debug("found in memory");
8835 + atomic_inc(&vlocation->usage);
8840 +/* found_in_cache: */
8841 + /* try to look up a cached volume in the cell VL databases by ID */
8842 + _debug("found in cache");
8844 + _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
8845 + vlocation->vldb.name,
8846 + vlocation->vldb.vidmask,
8847 + ntohl(vlocation->vldb.servers[0].s_addr),vlocation->vldb.srvtmask[0],
8848 + ntohl(vlocation->vldb.servers[1].s_addr),vlocation->vldb.srvtmask[1],
8849 + ntohl(vlocation->vldb.servers[2].s_addr),vlocation->vldb.srvtmask[2]
8852 + _debug("Vids: %08x %08x %08x",
8853 + vlocation->vldb.vid[0],vlocation->vldb.vid[1],vlocation->vldb.vid[2]);
8855 + if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_RW) {
8856 + vid = vlocation->vldb.vid[0];
8857 + voltype = AFSVL_RWVOL;
8859 + else if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_RO) {
8860 + vid = vlocation->vldb.vid[1];
8861 + voltype = AFSVL_ROVOL;
8863 + else if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_BAK) {
8864 + vid = vlocation->vldb.vid[2];
8865 + voltype = AFSVL_BACKVOL;
8873 + ret = afs_vlocation_access_vl_by_id(vlocation,vid,voltype,&vldb);
8877 + printk("kAFS: failed to volume '%s' (%x) up in '%s': %d\n",
8878 + name,vid,cell->name,ret);
8881 + /* pulled from local cache into memory */
8883 + goto found_on_vlserver;
8885 + /* uh oh... looks like the volume got deleted */
8887 + printk("kAFS: volume '%s' (%x) does not exist '%s'\n",name,vid,cell->name);
8889 + /* TODO: make existing record unavailable */
8893 + found_on_vlserver:
8894 + _debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
8897 + ntohl(vldb.servers[0].s_addr),vldb.srvtmask[0],
8898 + ntohl(vldb.servers[1].s_addr),vldb.srvtmask[1],
8899 + ntohl(vldb.servers[2].s_addr),vldb.srvtmask[2]
8902 + _debug("Vids: %08x %08x %08x",vldb.vid[0],vldb.vid[1],vldb.vid[2]);
8904 + if (strncmp(vldb.name,name,sizeof(vlocation->vldb.name))!=0)
8905 + printk("kAFS: name of volume '%s' changed to '%s' on server\n",name,vldb.name);
8907 + memcpy(&vlocation->vldb,&vldb,sizeof(vlocation->vldb));
8908 + vlocation->vldb.cell_ix = cell->cache_ix;
8911 + /* add volume entry to local cache */
8912 + ret = afs_cache_update_vlocation(vlocation);
8917 + afs_kafstimod_add_timer(&vlocation->upd_timer,10*HZ);
8919 + *_vlocation = vlocation;
8920 + _leave(" = 0 (%p)",vlocation);
8926 + __afs_put_vlocation(vlocation);
8929 + list_del(&vlocation->link);
8930 + afs_put_cell(vlocation->cell);
8932 + afs_put_cache(vlocation->cache);
8938 + _leave(" = %d",ret);
8940 +} /* end afs_vlocation_lookup() */
8942 +/*****************************************************************************/
8944 + * finish using a volume location record
8945 + * - caller must have cell->vol_sem write-locked
8947 +void __afs_put_vlocation(afs_vlocation_t *vlocation)
8949 + afs_cell_t *cell = vlocation->cell;
8951 + _enter("%s",vlocation->vldb.name);
8953 + /* sanity check */
8954 + if (atomic_read(&vlocation->usage)<=0)
8957 + spin_lock(&cell->vl_gylock);
8958 + if (likely(!atomic_dec_and_test(&vlocation->usage))) {
8959 + spin_unlock(&cell->vl_gylock);
8964 + /* move to graveyard queue */
8965 + list_del(&vlocation->link);
8966 + list_add_tail(&vlocation->link,&cell->vl_graveyard);
8968 + /* remove from pending timeout queue (refcounted if actually being updated) */
8969 + list_del_init(&vlocation->upd_op.link);
8971 + /* time out in 10 secs */
8972 + afs_kafstimod_del_timer(&vlocation->upd_timer);
8973 + afs_kafstimod_add_timer(&vlocation->timeout,10*HZ);
8975 + spin_unlock(&cell->vl_gylock);
8977 + _leave(" [killed]");
8978 +} /* end __afs_put_vlocation() */
8980 +/*****************************************************************************/
8982 + * finish using a volume location record
8984 +void afs_put_vlocation(afs_vlocation_t *vlocation)
8986 + afs_cell_t *cell = vlocation->cell;
8988 + down_write(&cell->vl_sem);
8989 + __afs_put_vlocation(vlocation);
8990 + up_write(&cell->vl_sem);
8991 +} /* end afs_put_vlocation() */
8993 +/*****************************************************************************/
8995 + * timeout vlocation record
8996 + * - removes from the cell's graveyard if the usage count is zero
8998 +void afs_vlocation_do_timeout(afs_vlocation_t *vlocation)
9002 + _enter("%s",vlocation->vldb.name);
9004 + cell = vlocation->cell;
9006 + if (atomic_read(&vlocation->usage)<0) BUG();
9008 + /* remove from graveyard if still dead */
9009 + spin_lock(&cell->vl_gylock);
9010 + if (atomic_read(&vlocation->usage)==0)
9011 + list_del_init(&vlocation->link);
9014 + spin_unlock(&cell->vl_gylock);
9018 + return; /* resurrected */
9021 + /* we can now destroy it properly */
9022 + afs_put_cell(cell);
9024 + afs_put_cache(vlocation->cache);
9029 + _leave(" [destroyed]");
9030 +} /* end afs_vlocation_do_timeout() */
9032 +/*****************************************************************************/
9034 + * send an update operation to the currently selected server
9036 +static int afs_vlocation_update_begin(afs_vlocation_t *vlocation)
9038 + afs_voltype_t voltype;
9042 + _enter("%s{ufs=%u ucs=%u}",
9043 + vlocation->vldb.name,vlocation->upd_first_svix,vlocation->upd_curr_svix);
9045 + /* try to look up a cached volume in the cell VL databases by ID */
9046 + if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_RW) {
9047 + vid = vlocation->vldb.vid[0];
9048 + voltype = AFSVL_RWVOL;
9050 + else if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_RO) {
9051 + vid = vlocation->vldb.vid[1];
9052 + voltype = AFSVL_ROVOL;
9054 + else if (vlocation->vldb.vidmask & AFS_CACHE_VOL_STM_BAK) {
9055 + vid = vlocation->vldb.vid[2];
9056 + voltype = AFSVL_BACKVOL;
9064 + /* contact the chosen server */
9065 + ret = afs_server_lookup(vlocation->cell,
9066 + &vlocation->cell->vl_addrs[vlocation->upd_curr_svix],
9067 + &vlocation->upd_op.server);
9074 + _leave(" = %d",ret);
9078 + /* initiate the update operation */
9079 + ret = afs_rxvl_get_entry_by_id_async(&vlocation->upd_op,vid,voltype);
9081 + _leave(" = %d",ret);
9085 + _leave(" = %d",ret);
9087 +} /* end afs_vlocation_update_begin() */
9089 +/*****************************************************************************/
9091 + * abandon updating a VL record
9092 + * - does not restart the update timer
9094 +static void afs_vlocation_update_abandon(afs_vlocation_t *vlocation,
9095 + afs_vlocation_upd_t state,
9098 + _enter("%s,%u",vlocation->vldb.name,state);
9101 + printk("kAFS: Abandoning VL update '%s': %d\n",vlocation->vldb.name,ret);
9103 + /* discard the server record */
9104 + if (vlocation->upd_op.server) {
9105 + afs_put_server(vlocation->upd_op.server);
9106 + vlocation->upd_op.server = NULL;
9109 + spin_lock(&afs_vlocation_update_lock);
9110 + afs_vlocation_update = NULL;
9111 + vlocation->upd_state = state;
9113 + /* TODO: start updating next VL record on pending list */
9115 + spin_unlock(&afs_vlocation_update_lock);
9118 +} /* end afs_vlocation_update_abandon() */
9120 +/*****************************************************************************/
9122 + * handle periodic update timeouts and busy retry timeouts
9123 + * - called from kafstimod
9125 +static void afs_vlocation_update_timer(afs_timer_t *timer)
9127 + afs_vlocation_t *vlocation = list_entry(timer,afs_vlocation_t,upd_timer);
9130 + _enter("%s",vlocation->vldb.name);
9132 + /* only update if not in the graveyard (defend against putting too) */
9133 + spin_lock(&vlocation->cell->vl_gylock);
9135 + if (!atomic_read(&vlocation->usage))
9138 + spin_lock(&afs_vlocation_update_lock);
9140 + /* if we were woken up due to EBUSY sleep then restart immediately if possible or else jump
9141 + * to front of pending queue */
9142 + if (vlocation->upd_state==AFS_VLUPD_BUSYSLEEP) {
9143 + if (afs_vlocation_update) {
9144 + list_add(&vlocation->upd_op.link,&afs_vlocation_update_pendq);
9147 + afs_get_vlocation(vlocation);
9148 + afs_vlocation_update = vlocation;
9149 + vlocation->upd_state = AFS_VLUPD_INPROGRESS;
9154 + /* put on pending queue if there's already another update in progress */
9155 + if (afs_vlocation_update) {
9156 + vlocation->upd_state = AFS_VLUPD_PENDING;
9157 + list_add_tail(&vlocation->upd_op.link,&afs_vlocation_update_pendq);
9161 + /* hold a ref on it while actually updating */
9162 + afs_get_vlocation(vlocation);
9163 + afs_vlocation_update = vlocation;
9164 + vlocation->upd_state = AFS_VLUPD_INPROGRESS;
9166 + spin_unlock(&afs_vlocation_update_lock);
9167 + spin_unlock(&vlocation->cell->vl_gylock);
9169 + /* okay... we can start the update */
9170 + _debug("BEGIN VL UPDATE [%s]",vlocation->vldb.name);
9171 + vlocation->upd_first_svix = vlocation->cell->vl_curr_svix;
9172 + vlocation->upd_curr_svix = vlocation->upd_first_svix;
9173 + vlocation->upd_rej_cnt = 0;
9174 + vlocation->upd_busy_cnt = 0;
9176 + ret = afs_vlocation_update_begin(vlocation);
9178 + afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,ret);
9179 + afs_kafstimod_add_timer(&vlocation->upd_timer,AFS_VLDB_TIMEOUT);
9180 + afs_put_vlocation(vlocation);
9187 + spin_unlock(&afs_vlocation_update_lock);
9189 + spin_unlock(&vlocation->cell->vl_gylock);
9193 +} /* end afs_vlocation_update_timer() */
9195 +/*****************************************************************************/
9197 + * attend to an update operation upon which an event happened
9198 + * - called in kafsasyncd context
9200 +static void afs_vlocation_update_attend(afs_async_op_t *op)
9202 + struct afs_cache_volume vldb;
9203 + afs_vlocation_t *vlocation = list_entry(op,afs_vlocation_t,upd_op);
9207 + _enter("%s",vlocation->vldb.name);
9209 + ret = afs_rxvl_get_entry_by_id_async2(op,&vldb);
9212 + _leave(" [unfinished]");
9216 + _debug("END VL UPDATE: %d\n",ret);
9217 + vlocation->valid = 1;
9219 + _debug("Done VL Lookup: %02x { %08x(%x) %08x(%x) %08x(%x) }",
9221 + ntohl(vldb.servers[0].s_addr),vldb.srvtmask[0],
9222 + ntohl(vldb.servers[1].s_addr),vldb.srvtmask[1],
9223 + ntohl(vldb.servers[2].s_addr),vldb.srvtmask[2]
9226 + _debug("Vids: %08x %08x %08x",vldb.vid[0],vldb.vid[1],vldb.vid[2]);
9228 + afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,0);
9230 + down_write(&vlocation->cell->vl_sem);
9232 + /* actually update the cache */
9233 + if (strncmp(vldb.name,vlocation->vldb.name,sizeof(vlocation->vldb.name))!=0)
9234 + printk("kAFS: name of volume '%s' changed to '%s' on server\n",
9235 + vlocation->vldb.name,vldb.name);
9237 + memcpy(&vlocation->vldb,&vldb,sizeof(vlocation->vldb));
9238 + vlocation->vldb.cell_ix = vlocation->cell->cache_ix;
9241 + /* add volume entry to local cache */
9242 + ret = afs_cache_update_vlocation(vlocation);
9245 + up_write(&vlocation->cell->vl_sem);
9248 + printk("kAFS: failed to update local cache: %d\n",ret);
9250 + afs_kafstimod_add_timer(&vlocation->upd_timer,AFS_VLDB_TIMEOUT);
9251 + afs_put_vlocation(vlocation);
9252 + _leave(" [found]");
9256 + vlocation->upd_rej_cnt++;
9259 + /* the server is locked - retry in a very short while */
9261 + vlocation->upd_busy_cnt++;
9262 + if (vlocation->upd_busy_cnt>3)
9263 + goto try_next; /* too many retries */
9265 + afs_vlocation_update_abandon(vlocation,AFS_VLUPD_BUSYSLEEP,0);
9266 + afs_kafstimod_add_timer(&vlocation->upd_timer,HZ/2);
9267 + afs_put_vlocation(vlocation);
9268 + _leave(" [busy]");
9271 + case -ENETUNREACH:
9272 + case -EHOSTUNREACH:
9273 + case -ECONNREFUSED:
9275 + /* record bad vlserver info in the cell too
9276 + * - TODO: use down_write_trylock() if available
9278 + if (vlocation->upd_curr_svix == vlocation->cell->vl_curr_svix)
9279 + vlocation->cell->vl_curr_svix =
9280 + vlocation->cell->vl_curr_svix % vlocation->cell->vl_naddrs;
9292 + /* try contacting the next server */
9294 + vlocation->upd_busy_cnt = 0;
9296 + if (vlocation->upd_op.server) {
9297 + /* discard the server record */
9298 + afs_put_server(vlocation->upd_op.server);
9299 + vlocation->upd_op.server = NULL;
9302 + tmp = vlocation->cell->vl_naddrs;
9306 + vlocation->upd_curr_svix++;
9307 + if (vlocation->upd_curr_svix >= tmp) vlocation->upd_curr_svix = 0;
9308 + if (vlocation->upd_first_svix >= tmp) vlocation->upd_first_svix = tmp - 1;
9310 + /* move to the next server */
9311 + if (vlocation->upd_curr_svix!=vlocation->upd_first_svix) {
9312 + afs_vlocation_update_begin(vlocation);
9313 + _leave(" [next]");
9317 + /* run out of servers to try - was the volume rejected? */
9318 + if (vlocation->upd_rej_cnt>0) {
9319 + printk("kAFS: Active volume no longer valid '%s'\n",vlocation->vldb.name);
9320 + vlocation->valid = 0;
9321 + afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,0);
9322 + afs_kafstimod_add_timer(&vlocation->upd_timer,AFS_VLDB_TIMEOUT);
9323 + afs_put_vlocation(vlocation);
9324 + _leave(" [invalidated]");
9328 + /* abandon the update */
9330 + afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,ret);
9331 + afs_kafstimod_add_timer(&vlocation->upd_timer,HZ*10);
9332 + afs_put_vlocation(vlocation);
9333 + _leave(" [abandoned]");
9335 +} /* end afs_vlocation_update_attend() */
9337 +/*****************************************************************************/
9339 + * deal with an update operation being discarded
9340 + * - called in kafsasyncd context when it's dying due to rmmod
9341 + * - the call has already been aborted and put()'d
9343 +static void afs_vlocation_update_discard(afs_async_op_t *op)
9345 + afs_vlocation_t *vlocation = list_entry(op,afs_vlocation_t,upd_op);
9347 + _enter("%s",vlocation->vldb.name);
9349 + afs_put_server(op->server);
9350 + op->server = NULL;
9352 + afs_put_vlocation(vlocation);
9355 +} /* end afs_vlocation_update_discard() */
9356 diff -urNp linux-5240/fs/afs/vnode.c linux-5250/fs/afs/vnode.c
9357 --- linux-5240/fs/afs/vnode.c 1970-01-01 01:00:00.000000000 +0100
9358 +++ linux-5250/fs/afs/vnode.c
9360 +/* vnode.c: AFS vnode management
9362 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
9363 + * Written by David Howells (dhowells@redhat.com)
9365 + * This program is free software; you can redistribute it and/or
9366 + * modify it under the terms of the GNU General Public License
9367 + * as published by the Free Software Foundation; either version
9368 + * 2 of the License, or (at your option) any later version.
9371 +#include <linux/kernel.h>
9372 +#include <linux/module.h>
9373 +#include <linux/init.h>
9374 +#include <linux/slab.h>
9375 +#include <linux/fs.h>
9376 +#include <linux/pagemap.h>
9377 +#include "volume.h"
9379 +#include "cmservice.h"
9380 +#include "fsclient.h"
9381 +#include "vlclient.h"
9384 +#include "internal.h"
9386 +static void afs_vnode_cb_timed_out(struct afs_timer *timer);
9388 +struct afs_timer_ops afs_vnode_cb_timed_out_ops = {
9389 + timed_out: afs_vnode_cb_timed_out,
9392 +/*****************************************************************************/
9394 + * handle a callback timing out
9395 + * TODO: retain a ref to vnode struct for an outstanding callback timeout
9397 +static void afs_vnode_cb_timed_out(struct afs_timer *timer)
9399 + afs_server_t *oldserver;
9400 + afs_vnode_t *vnode;
9402 + vnode = list_entry(timer,afs_vnode_t,cb_timeout);
9404 + _enter("%p",vnode);
9406 + /* set the changed flag in the vnode and release the server */
9407 + spin_lock(&vnode->lock);
9409 + oldserver = xchg(&vnode->cb_server,NULL);
9411 + vnode->flags |= AFS_VNODE_CHANGED;
9413 + spin_lock(&afs_cb_hash_lock);
9414 + list_del_init(&vnode->cb_hash_link);
9415 + spin_unlock(&afs_cb_hash_lock);
9417 + spin_lock(&oldserver->cb_lock);
9418 + list_del_init(&vnode->cb_link);
9419 + spin_unlock(&oldserver->cb_lock);
9422 + spin_unlock(&vnode->lock);
9425 + afs_put_server(oldserver);
9428 +} /* end afs_vnode_cb_timed_out() */
9430 +/*****************************************************************************/
9432 + * finish off updating the recorded status of a file
9433 + * - starts callback expiry timer
9434 + * - adds to server's callback list
9436 +void afs_vnode_finalise_status_update(afs_vnode_t *vnode, afs_server_t *server, int ret)
9438 + afs_server_t *oldserver = NULL;
9440 + _enter("%p,%p,%d",vnode,server,ret);
9442 + spin_lock(&vnode->lock);
9444 + vnode->flags &= ~AFS_VNODE_CHANGED;
9447 + /* adjust the callback timeout appropriately */
9448 + afs_kafstimod_add_timer(&vnode->cb_timeout,vnode->cb_expiry*HZ);
9450 + spin_lock(&afs_cb_hash_lock);
9451 + list_del(&vnode->cb_hash_link);
9452 + list_add_tail(&vnode->cb_hash_link,&afs_cb_hash(server,&vnode->fid));
9453 + spin_unlock(&afs_cb_hash_lock);
9455 + /* swap ref to old callback server with that for new callback server */
9456 + oldserver = xchg(&vnode->cb_server,server);
9457 + if (oldserver!=server) {
9459 + spin_lock(&oldserver->cb_lock);
9460 + list_del_init(&vnode->cb_link);
9461 + spin_unlock(&oldserver->cb_lock);
9464 + afs_get_server(server);
9465 + spin_lock(&server->cb_lock);
9466 + list_add_tail(&vnode->cb_link,&server->cb_promises);
9467 + spin_unlock(&server->cb_lock);
9474 + else if (ret==-ENOENT) {
9475 + /* the file was deleted - clear the callback timeout */
9476 + oldserver = xchg(&vnode->cb_server,NULL);
9477 + afs_kafstimod_del_timer(&vnode->cb_timeout);
9479 + _debug("got NOENT from server - marking file deleted");
9480 + vnode->flags |= AFS_VNODE_DELETED;
9483 + vnode->update_cnt--;
9485 + spin_unlock(&vnode->lock);
9487 + wake_up_all(&vnode->update_waitq);
9490 + afs_put_server(oldserver);
9494 +} /* end afs_vnode_finalise_status_update() */
9496 +/*****************************************************************************/
9498 + * fetch file status from the volume
9499 + * - don't issue a fetch if:
9500 + * - the changed bit is not set and there's a valid callback
9501 + * - there are any outstanding ops that will fetch the status
9502 + * - TODO implement local caching
9504 +int afs_vnode_fetch_status(afs_vnode_t *vnode)
9506 + afs_server_t *server;
9509 + DECLARE_WAITQUEUE(myself,current);
9511 + _enter("%s,{%u,%u,%u}",vnode->volume->vlocation->vldb.name,
9512 + vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
9514 + if (!(vnode->flags & AFS_VNODE_CHANGED) && vnode->cb_server) {
9515 + _leave(" [unchanged]");
9519 + if (vnode->flags & AFS_VNODE_DELETED) {
9520 + _leave(" [deleted]");
9524 + spin_lock(&vnode->lock);
9526 + if (!(vnode->flags & AFS_VNODE_CHANGED)) {
9527 + spin_unlock(&vnode->lock);
9528 + _leave(" [unchanged]");
9532 + if (vnode->update_cnt>0) {
9533 + /* someone else started a fetch */
9534 + set_current_state(TASK_UNINTERRUPTIBLE);
9535 + add_wait_queue(&vnode->update_waitq,&myself);
9537 + /* wait for the status to be updated */
9539 + if (!(vnode->flags & AFS_VNODE_CHANGED)) break;
9540 + if (vnode->flags & AFS_VNODE_DELETED) break;
9542 + /* it got updated and invalidated all before we saw it */
9543 + if (vnode->update_cnt==0) {
9544 + remove_wait_queue(&vnode->update_waitq,&myself);
9545 + set_current_state(TASK_RUNNING);
9549 + spin_unlock(&vnode->lock);
9552 + set_current_state(TASK_UNINTERRUPTIBLE);
9554 + spin_lock(&vnode->lock);
9557 + remove_wait_queue(&vnode->update_waitq,&myself);
9558 + spin_unlock(&vnode->lock);
9559 + set_current_state(TASK_RUNNING);
9561 + return vnode->flags & AFS_VNODE_DELETED ? -ENOENT : 0;
9565 + /* okay... we're going to have to initiate the op */
9566 + vnode->update_cnt++;
9568 + spin_unlock(&vnode->lock);
9570 + /* merge AFS status fetches and clear outstanding callback on this vnode */
9572 + /* pick a server to query */
9573 + ret = afs_volume_pick_fileserver(vnode->volume,&server);
9577 + _debug("USING SERVER: %08x\n",ntohl(server->addr.s_addr));
9579 + ret = afs_rxfs_fetch_file_status(server,vnode,NULL);
9581 + } while (!afs_volume_release_fileserver(vnode->volume,server,ret));
9583 + /* adjust the flags */
9584 + afs_vnode_finalise_status_update(vnode,server,ret);
9586 + _leave(" = %d",ret);
9588 +} /* end afs_vnode_fetch_status() */
9590 +/*****************************************************************************/
9592 + * fetch file data from the volume
9593 + * - TODO implement caching and server failover
9595 +int afs_vnode_fetch_data(afs_vnode_t *vnode, struct afs_rxfs_fetch_descriptor *desc)
9597 + afs_server_t *server;
9600 + _enter("%s,{%u,%u,%u}",
9601 + vnode->volume->vlocation->vldb.name,
9604 + vnode->fid.unique);
9606 + /* this op will fetch the status */
9607 + spin_lock(&vnode->lock);
9608 + vnode->update_cnt++;
9609 + spin_unlock(&vnode->lock);
9611 + /* merge in AFS status fetches and clear outstanding callback on this vnode */
9613 + /* pick a server to query */
9614 + ret = afs_volume_pick_fileserver(vnode->volume,&server);
9618 + _debug("USING SERVER: %08x\n",ntohl(server->addr.s_addr));
9620 + ret = afs_rxfs_fetch_file_data(server,vnode,desc,NULL);
9622 + } while (!afs_volume_release_fileserver(vnode->volume,server,ret));
9624 + /* adjust the flags */
9625 + afs_vnode_finalise_status_update(vnode,server,ret);
9627 + _leave(" = %d",ret);
9630 +} /* end afs_vnode_fetch_data() */
9632 +/*****************************************************************************/
9634 + * break any outstanding callback on a vnode
9635 + * - only relevent to server that issued it
9637 +int afs_vnode_give_up_callback(afs_vnode_t *vnode)
9639 + afs_server_t *server;
9642 + _enter("%s,{%u,%u,%u}",
9643 + vnode->volume->vlocation->vldb.name,
9646 + vnode->fid.unique);
9648 + spin_lock(&afs_cb_hash_lock);
9649 + list_del_init(&vnode->cb_hash_link);
9650 + spin_unlock(&afs_cb_hash_lock);
9652 + /* set the changed flag in the vnode and release the server */
9653 + spin_lock(&vnode->lock);
9655 + afs_kafstimod_del_timer(&vnode->cb_timeout);
9657 + server = xchg(&vnode->cb_server,NULL);
9659 + vnode->flags |= AFS_VNODE_CHANGED;
9661 + spin_lock(&server->cb_lock);
9662 + list_del_init(&vnode->cb_link);
9663 + spin_unlock(&server->cb_lock);
9666 + spin_unlock(&vnode->lock);
9670 + ret = afs_rxfs_give_up_callback(server,vnode);
9671 + afs_put_server(server);
9674 + _leave(" = %d",ret);
9676 +} /* end afs_vnode_give_up_callback() */
9677 diff -urNp linux-5240/fs/afs/vnode.h linux-5250/fs/afs/vnode.h
9678 --- linux-5240/fs/afs/vnode.h 1970-01-01 01:00:00.000000000 +0100
9679 +++ linux-5250/fs/afs/vnode.h
9681 +/* vnode.h: AFS vnode record
9683 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
9684 + * Written by David Howells (dhowells@redhat.com)
9686 + * This program is free software; you can redistribute it and/or
9687 + * modify it under the terms of the GNU General Public License
9688 + * as published by the Free Software Foundation; either version
9689 + * 2 of the License, or (at your option) any later version.
9692 +#ifndef _LINUX_AFS_VNODE_H
9693 +#define _LINUX_AFS_VNODE_H
9695 +#include <linux/fs.h>
9696 +#include <linux/version.h>
9697 +#include "server.h"
9698 +#include "kafstimod.h"
9702 +struct afs_rxfs_fetch_descriptor;
9704 +/*****************************************************************************/
9706 + * AFS inode private data
9710 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
9711 + struct inode vfs_inode; /* the VFS's inode record */
9713 + struct inode *inode; /* the VFS's inode */
9716 + afs_volume_t *volume; /* volume on which vnode resides */
9717 + afs_fid_t fid; /* the file identifier for this inode */
9718 + afs_file_status_t status; /* AFS status info for this file */
9719 + unsigned nix; /* vnode index in cache */
9721 + wait_queue_head_t update_waitq; /* status fetch waitqueue */
9722 + unsigned update_cnt; /* number of outstanding ops that will update the
9724 + spinlock_t lock; /* waitqueue/flags lock */
9726 +#define AFS_VNODE_CHANGED 0x00000001 /* set if vnode reported changed by callback */
9727 +#define AFS_VNODE_DELETED 0x00000002 /* set if vnode deleted on server */
9728 +#define AFS_VNODE_MOUNTPOINT 0x00000004 /* set if vnode is a mountpoint symlink */
9730 + /* outstanding callback notification on this file */
9731 + afs_server_t *cb_server; /* server that made the current promise */
9732 + struct list_head cb_link; /* link in server's promises list */
9733 + struct list_head cb_hash_link; /* link in master callback hash */
9734 + afs_timer_t cb_timeout; /* timeout on promise */
9735 + unsigned cb_version; /* callback version */
9736 + unsigned cb_expiry; /* callback expiry time */
9737 + afs_callback_type_t cb_type; /* type of callback */
9740 +static inline afs_vnode_t *AFS_FS_I(struct inode *inode)
9742 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
9743 + return list_entry(inode,afs_vnode_t,vfs_inode);
9745 + return inode->u.generic_ip;
9749 +static inline struct inode *AFS_VNODE_TO_I(afs_vnode_t *vnode)
9751 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
9752 + return &vnode->vfs_inode;
9754 + return vnode->inode;
9758 +extern int afs_vnode_fetch_status(afs_vnode_t *vnode);
9760 +extern int afs_vnode_fetch_data(afs_vnode_t *vnode, struct afs_rxfs_fetch_descriptor *desc);
9762 +extern int afs_vnode_give_up_callback(afs_vnode_t *vnode);
9764 +extern struct afs_timer_ops afs_vnode_cb_timed_out_ops;
9766 +#endif /* __KERNEL__ */
9768 +#endif /* _LINUX_AFS_VNODE_H */
9769 diff -urNp linux-5240/fs/afs/volume.c linux-5250/fs/afs/volume.c
9770 --- linux-5240/fs/afs/volume.c 1970-01-01 01:00:00.000000000 +0100
9771 +++ linux-5250/fs/afs/volume.c
9773 +/* volume.c: AFS volume management
9775 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
9776 + * Written by David Howells (dhowells@redhat.com)
9778 + * This program is free software; you can redistribute it and/or
9779 + * modify it under the terms of the GNU General Public License
9780 + * as published by the Free Software Foundation; either version
9781 + * 2 of the License, or (at your option) any later version.
9784 +#include <linux/kernel.h>
9785 +#include <linux/module.h>
9786 +#include <linux/init.h>
9787 +#include <linux/slab.h>
9788 +#include <linux/fs.h>
9789 +#include <linux/pagemap.h>
9790 +#include "volume.h"
9792 +#include "cmservice.h"
9793 +#include "fsclient.h"
9794 +#include "vlclient.h"
9796 +#include "internal.h"
9798 +const char *afs_voltypes[] = { "R/W", "R/O", "BAK" };
9800 +/*****************************************************************************/
9802 + * lookup a volume by name
9803 + * - this can be one of the following:
9804 + * "%[cell:]volume[.]" R/W volume
9805 + * "#[cell:]volume[.]" R/O or R/W volume (rwparent=0), or R/W (rwparent=1) volume
9806 + * "%[cell:]volume.readonly" R/O volume
9807 + * "#[cell:]volume.readonly" R/O volume
9808 + * "%[cell:]volume.backup" Backup volume
9809 + * "#[cell:]volume.backup" Backup volume
9811 + * The cell name is optional, and defaults to the current cell.
9813 + * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin Guide
9814 + * - Rule 1: Explicit type suffix forces access of that type or nothing
9815 + * (no suffix, then use Rule 2 & 3)
9816 + * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W if not available
9817 + * - Rule 3: If parent volume is R/W, then only mount R/W volume unless explicitly told otherwise
9819 +int afs_volume_lookup(afs_cache_t *cache, char *name, int rwparent, afs_volume_t **_volume)
9821 + afs_vlocation_t *vlocation = NULL;
9822 + afs_voltype_t type;
9823 + afs_volume_t *volume = NULL;
9824 + afs_cell_t *cell = NULL;
9825 + char *cellname, *volname, *suffix;
9827 + int force, ret, loop;
9829 + _enter(",%s,",name);
9831 + if (!name || (name[0]!='%' && name[0]!='#') || !name[1]) {
9832 + printk("kAFS: unparsable volume name\n");
9836 + /* determine the type of volume we're looking for */
9838 + type = AFSVL_ROVOL;
9840 + if (rwparent || name[0]=='%') {
9841 + type = AFSVL_RWVOL;
9845 + suffix = strrchr(name,'.');
9847 + if (strcmp(suffix,".readonly")==0) {
9848 + type = AFSVL_ROVOL;
9851 + else if (strcmp(suffix,".backup")==0) {
9852 + type = AFSVL_BACKVOL;
9855 + else if (suffix[1]==0) {
9864 + /* split the cell and volume names */
9866 + volname = strchr(name,':');
9876 + _debug("CELL:%s VOLUME:%s SUFFIX:%s TYPE:%d%s",
9877 + cellname,volname,suffix?:"-",type,force?" FORCE":"");
9879 + /* lookup the cell record */
9880 + ret = afs_cell_lookup(cache,cellname,&cell);
9882 + printk("kAFS: unable to lookup cell '%s'\n",cellname?:"");
9884 + if (cellname) volname[-1] = ':';
9888 + /* lookup the volume location record */
9889 + if (suffix) *suffix = 0;
9890 + ret = afs_vlocation_lookup(cache,cell,volname,&vlocation);
9891 + if (suffix) *suffix = '.';
9895 + /* make the final decision on the type we want */
9897 + if (force && !(vlocation->vldb.vidmask & (1<<type)))
9901 + for (loop=0; loop<vlocation->vldb.nservers; loop++)
9902 + srvtmask |= vlocation->vldb.srvtmask[loop];
9905 + if (!(srvtmask & (1 <<type)))
9908 + else if (srvtmask & AFS_CACHE_VOL_STM_RO) {
9909 + type = AFSVL_ROVOL;
9911 + else if (srvtmask & AFS_CACHE_VOL_STM_RW) {
9912 + type = AFSVL_RWVOL;
9918 + down_write(&cell->vl_sem);
9920 + /* is the volume already active? */
9921 + if (vlocation->vols[type]) {
9922 + /* yes - re-use it */
9923 + volume = vlocation->vols[type];
9924 + afs_get_volume(volume);
9928 + /* create a new volume record */
9929 + _debug("creating new volume record");
9932 + volume = kmalloc(sizeof(afs_volume_t),GFP_KERNEL);
9936 + memset(volume,0,sizeof(afs_volume_t));
9937 + atomic_set(&volume->usage,1);
9938 + volume->type = type;
9939 + volume->type_force = force;
9940 + volume->cell = cell;
9941 + volume->cix = cell->cache_ix;
9942 + volume->vid = vlocation->vldb.vid[type];
9944 + volume->vix.index = (vlocation->vix.index << 2) | type;
9946 + init_rwsem(&volume->server_sem);
9948 + /* look up all the applicable server records */
9949 + for (loop=0; loop<8; loop++) {
9950 + if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) {
9951 + ret = afs_server_lookup(volume->cell,
9952 + &vlocation->vldb.servers[loop],
9953 + &volume->servers[volume->nservers]);
9955 + goto error_discard;
9957 + volume->nservers++;
9961 + /* attach the cache and volume location */
9963 + afs_get_cache(cache); volume->cache = cache;
9965 + afs_get_vlocation(vlocation); volume->vlocation = vlocation;
9967 + vlocation->vols[type] = volume;
9970 + _debug("kAFS selected %s volume %08x",afs_voltypes[volume->type],volume->vid);
9971 + *_volume = volume;
9976 + up_write(&cell->vl_sem);
9978 + if (vlocation) afs_put_vlocation(vlocation);
9979 + if (cell) afs_put_cell(cell);
9981 + _leave(" = %d (%p)",ret,volume);
9985 + up_write(&cell->vl_sem);
9987 + for (loop=volume->nservers-1; loop>=0; loop--)
9988 + if (volume->servers[loop])
9989 + afs_put_server(volume->servers[loop]);
9993 +} /* end afs_volume_lookup() */
9995 +/*****************************************************************************/
9997 + * destroy a volume record
9999 +void afs_put_volume(afs_volume_t *volume)
10001 + afs_vlocation_t *vlocation;
10004 + _enter("%p",volume);
10006 + vlocation = volume->vlocation;
10008 + /* sanity check */
10009 + if (atomic_read(&volume->usage)<=0)
10012 + /* to prevent a race, the decrement and the dequeue must be effectively atomic */
10013 + down_write(&vlocation->cell->vl_sem);
10015 + if (likely(!atomic_dec_and_test(&volume->usage))) {
10016 + up_write(&vlocation->cell->vl_sem);
10021 + vlocation->vols[volume->type] = NULL;
10023 + up_write(&vlocation->cell->vl_sem);
10025 + afs_put_vlocation(vlocation);
10027 + /* finish cleaning up the volume */
10029 + if (volume->cache) afs_put_cache(volume->cache);
10032 + for (loop=volume->nservers-1; loop>=0; loop--)
10033 + if (volume->servers[loop])
10034 + afs_put_server(volume->servers[loop]);
10038 + _leave(" [destroyed]");
10039 +} /* end afs_put_volume() */
10041 +/*****************************************************************************/
10043 + * pick a server to use to try accessing this volume
10044 + * - returns with an elevated usage count on the server chosen
10046 +int afs_volume_pick_fileserver(afs_volume_t *volume, afs_server_t **_server)
10048 + afs_server_t *server;
10049 + int ret, state, loop;
10051 + _enter("%s",volume->vlocation->vldb.name);
10053 + down_read(&volume->server_sem);
10055 + /* handle the no-server case */
10056 + if (volume->nservers==0) {
10057 + ret = volume->rjservers ? -ENOMEDIUM : -ESTALE;
10058 + up_read(&volume->server_sem);
10059 + _leave(" = %d [no servers]",ret);
10063 + /* basically, just search the list for the first live server and use that */
10065 + for (loop=0; loop<volume->nservers; loop++) {
10066 + server = volume->servers[loop];
10067 + state = server->fs_state;
10070 + /* found an apparently healthy server */
10072 + afs_get_server(server);
10073 + up_read(&volume->server_sem);
10074 + *_server = server;
10075 + _leave(" = 0 (picked %08x)",ntohl(server->addr.s_addr));
10078 + case -ENETUNREACH:
10083 + case -EHOSTUNREACH:
10084 + if (ret==0 || ret==-ENETUNREACH)
10088 + case -ECONNREFUSED:
10089 + if (ret==0 || ret==-ENETUNREACH || ret==-EHOSTUNREACH)
10096 + ret==-ENETUNREACH ||
10097 + ret==-EHOSTUNREACH ||
10098 + ret==-ECONNREFUSED)
10104 + /* no available servers
10105 + * - TODO: handle the no active servers case better
10107 + up_read(&volume->server_sem);
10108 + _leave(" = %d",ret);
10110 +} /* end afs_volume_pick_fileserver() */
10112 +/*****************************************************************************/
10114 + * release a server after use
10115 + * - releases the ref on the server struct that was acquired by picking
10116 + * - records result of using a particular server to access a volume
10117 + * - return 0 to try again, 1 if okay or to issue error
10119 +int afs_volume_release_fileserver(afs_volume_t *volume, afs_server_t *server, int result)
10123 + _enter("%s,%08x,%d",volume->vlocation->vldb.name,ntohl(server->addr.s_addr),result);
10125 + switch (result) {
10128 + server->fs_act_jif = jiffies;
10131 + /* the fileserver denied all knowledge of the volume */
10133 + server->fs_act_jif = jiffies;
10134 + down_write(&volume->server_sem);
10136 + /* first, find where the server is in the active list (if it is) */
10137 + for (loop=0; loop<volume->nservers; loop++)
10138 + if (volume->servers[loop]==server)
10141 + /* no longer there - may have been discarded by another op */
10142 + goto try_next_server_upw;
10145 + volume->nservers--;
10146 + memmove(&volume->servers[loop],
10147 + &volume->servers[loop+1],
10148 + sizeof(volume->servers[loop]) * (volume->nservers - loop)
10150 + volume->servers[volume->nservers] = NULL;
10151 + afs_put_server(server);
10152 + volume->rjservers++;
10154 + if (volume->nservers>0)
10155 + /* another server might acknowledge its existence */
10156 + goto try_next_server_upw;
10158 + /* handle the case where all the fileservers have rejected the volume
10159 + * - TODO: try asking the fileservers for volume information
10160 + * - TODO: contact the VL server again to see if the volume is no longer registered
10162 + up_write(&volume->server_sem);
10163 + afs_put_server(server);
10164 + _leave(" [completely rejected]");
10167 + /* problem reaching the server */
10168 + case -ENETUNREACH:
10169 + case -EHOSTUNREACH:
10170 + case -ECONNREFUSED:
10173 + /* mark the server as dead
10174 + * TODO: vary dead timeout depending on error
10176 + spin_lock(&server->fs_lock);
10177 + if (!server->fs_state) {
10178 + server->fs_dead_jif = jiffies + HZ * 10;
10179 + server->fs_state = result;
10180 + printk("kAFS: SERVER DEAD state=%d\n",result);
10182 + spin_unlock(&server->fs_lock);
10183 + goto try_next_server;
10185 + /* miscellaneous error */
10187 + server->fs_act_jif = jiffies;
10193 + /* tell the caller to accept the result */
10194 + afs_put_server(server);
10198 + /* tell the caller to loop around and try the next server */
10199 + try_next_server_upw:
10200 + up_write(&volume->server_sem);
10202 + afs_put_server(server);
10203 + _leave(" [try next server]");
10206 +} /* end afs_volume_release_fileserver() */
10207 diff -urNp linux-5240/fs/afs/volume.h linux-5250/fs/afs/volume.h
10208 --- linux-5240/fs/afs/volume.h 1970-01-01 01:00:00.000000000 +0100
10209 +++ linux-5250/fs/afs/volume.h
10211 +/* volume.h: AFS volume management
10213 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10214 + * Written by David Howells (dhowells@redhat.com)
10216 + * This program is free software; you can redistribute it and/or
10217 + * modify it under the terms of the GNU General Public License
10218 + * as published by the Free Software Foundation; either version
10219 + * 2 of the License, or (at your option) any later version.
10222 +#ifndef _LINUX_AFS_VOLUME_H
10223 +#define _LINUX_AFS_VOLUME_H
10225 +#include "types.h"
10226 +#include "cache-layout.h"
10227 +#include "fsclient.h"
10228 +#include "kafstimod.h"
10229 +#include "kafsasyncd.h"
10231 +#define __packed __attribute__((packed))
10234 + AFS_VLUPD_SLEEP, /* sleeping waiting for update timer to fire */
10235 + AFS_VLUPD_PENDING, /* on pending queue */
10236 + AFS_VLUPD_INPROGRESS, /* op in progress */
10237 + AFS_VLUPD_BUSYSLEEP, /* sleeping because server returned EBUSY */
10239 +} __attribute__((packed)) afs_vlocation_upd_t;
10241 +/*****************************************************************************/
10243 + * AFS volume location record
10245 +struct afs_vlocation
10248 + struct list_head link; /* link in cell volume location list */
10249 + afs_timer_t timeout; /* decaching timer */
10250 + afs_cell_t *cell; /* cell to which volume belongs */
10252 + afs_cache_t *cache; /* backing cache */
10254 + afs_cache_volix_t vix; /* volume index in this cache */
10255 + struct afs_cache_volume vldb; /* volume information DB record */
10256 + struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
10257 + rwlock_t lock; /* access lock */
10258 + unsigned long read_jif; /* time at which last read from vlserver */
10259 + afs_timer_t upd_timer; /* update timer */
10260 + afs_async_op_t upd_op; /* update operation */
10261 + afs_vlocation_upd_t upd_state; /* update state */
10262 + unsigned short upd_first_svix; /* first server index during update */
10263 + unsigned short upd_curr_svix; /* current server index during update */
10264 + unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */
10265 + unsigned short upd_busy_cnt; /* EBUSY count during update */
10266 + unsigned short valid; /* T if valid */
10269 +extern int afs_vlocation_lookup(afs_cache_t *cache, afs_cell_t *cell, const char *name,
10270 + afs_vlocation_t **_vlocation);
10272 +#define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0)
10274 +extern void __afs_put_vlocation(afs_vlocation_t *vlocation);
10275 +extern void afs_put_vlocation(afs_vlocation_t *vlocation);
10276 +extern void afs_vlocation_do_timeout(afs_vlocation_t *vlocation);
10278 +/*****************************************************************************/
10280 + * AFS volume access record
10285 + afs_cell_t *cell; /* cell to which belongs (unrefd ptr) */
10286 + afs_vlocation_t *vlocation; /* volume location */
10287 + afs_volid_t vid; /* volume ID */
10288 + afs_voltype_t __packed type; /* type of volume */
10289 + char type_force; /* force volume type (suppress R/O -> R/W) */
10291 + afs_cache_t *cache; /* backing cache */
10293 + afs_cache_cellix_t cix; /* cell index in this cache */
10294 + afs_cache_volix_t vix; /* volume index in this cache */
10296 + unsigned short nservers; /* number of server slots filled */
10297 + unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */
10298 + afs_server_t *servers[8]; /* servers on which volume resides (ordered) */
10299 + struct rw_semaphore server_sem; /* lock for accessing current server */
10302 +extern int afs_volume_lookup(afs_cache_t *cache, char *name, int ro, afs_volume_t **_volume);
10304 +#define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
10306 +extern void afs_put_volume(afs_volume_t *volume);
10308 +extern int afs_volume_pick_fileserver(afs_volume_t *volume, afs_server_t **_server);
10310 +extern int afs_volume_release_fileserver(afs_volume_t *volume, afs_server_t *server, int result);
10312 +#endif /* _LINUX_AFS_VOLUME_H */
10313 diff -urNp linux-5240/fs/Config.in linux-5250/fs/Config.in
10314 --- linux-5240/fs/Config.in
10315 +++ linux-5250/fs/Config.in
10316 @@ -23,6 +23,7 @@ dep_mbool ' Enable reiserfs debug mode'
10317 dep_mbool ' Stats in /proc/fs/reiserfs' CONFIG_REISERFS_PROC_INFO $CONFIG_REISERFS_FS
10319 dep_tristate 'ADFS file system support (EXPERIMENTAL)' CONFIG_ADFS_FS $CONFIG_EXPERIMENTAL
10320 +dep_tristate 'AFS distributed file system support' CONFIG_AFS_FS $CONFIG_EXPERIMENTAL
10321 dep_mbool ' ADFS write support (DANGEROUS)' CONFIG_ADFS_FS_RW $CONFIG_ADFS_FS $CONFIG_EXPERIMENTAL
10323 dep_tristate 'Amiga FFS file system support (EXPERIMENTAL)' CONFIG_AFFS_FS $CONFIG_EXPERIMENTAL
10324 diff -urNp linux-5240/fs/Makefile linux-5250/fs/Makefile
10325 --- linux-5240/fs/Makefile
10326 +++ linux-5250/fs/Makefile
10327 @@ -67,6 +67,7 @@ subdir-$(CONFIG_UDF_FS) += udf
10328 subdir-$(CONFIG_AUTOFS_FS) += autofs
10329 subdir-$(CONFIG_AUTOFS4_FS) += autofs4
10330 subdir-$(CONFIG_ADFS_FS) += adfs
10331 +subdir-$(CONFIG_AFS_FS) += afs
10332 subdir-$(CONFIG_REISERFS_FS) += reiserfs
10333 subdir-$(CONFIG_DEVPTS_FS) += devpts
10334 subdir-$(CONFIG_SUN_OPENPROMFS) += openpromfs
10335 diff -urNp linux-5240/include/rxrpc/call.h linux-5250/include/rxrpc/call.h
10336 --- linux-5240/include/rxrpc/call.h 1970-01-01 01:00:00.000000000 +0100
10337 +++ linux-5250/include/rxrpc/call.h
10339 +/* call.h: Rx call record
10341 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10342 + * Written by David Howells (dhowells@redhat.com)
10344 + * This program is free software; you can redistribute it and/or
10345 + * modify it under the terms of the GNU General Public License
10346 + * as published by the Free Software Foundation; either version
10347 + * 2 of the License, or (at your option) any later version.
10350 +#ifndef _LINUX_RXRPC_CALL_H
10351 +#define _LINUX_RXRPC_CALL_H
10353 +#include <rxrpc/types.h>
10354 +#include <rxrpc/rxrpc.h>
10355 +#include <rxrpc/packet.h>
10356 +#include <linux/timer.h>
10358 +#define RXRPC_CALL_ACK_WINDOW_SIZE 16
10360 +extern unsigned rxrpc_call_rcv_timeout; /* receive activity timeout (secs) */
10361 +extern unsigned rxrpc_call_acks_timeout; /* pending ACK (retransmit) timeout (secs) */
10362 +extern unsigned rxrpc_call_dfr_ack_timeout; /* deferred ACK timeout (secs) */
10363 +extern unsigned short rxrpc_call_max_resend; /* maximum consecutive resend count */
10365 +/* application call state
10366 + * - only state 0 and ffff are reserved, the state is set to 1 after an opid is received
10368 +enum rxrpc_app_cstate {
10369 + RXRPC_CSTATE_COMPLETE = 0, /* operation complete */
10370 + RXRPC_CSTATE_ERROR, /* operation ICMP error or aborted */
10371 + RXRPC_CSTATE_SRVR_RCV_OPID, /* [SERVER] receiving operation ID */
10372 + RXRPC_CSTATE_SRVR_RCV_ARGS, /* [SERVER] receiving operation data */
10373 + RXRPC_CSTATE_SRVR_GOT_ARGS, /* [SERVER] completely received operation data */
10374 + RXRPC_CSTATE_SRVR_SND_REPLY, /* [SERVER] sending operation reply */
10375 + RXRPC_CSTATE_SRVR_RCV_FINAL_ACK, /* [SERVER] receiving final ACK */
10376 + RXRPC_CSTATE_CLNT_SND_ARGS, /* [CLIENT] sending operation args */
10377 + RXRPC_CSTATE_CLNT_RCV_REPLY, /* [CLIENT] receiving operation reply */
10378 + RXRPC_CSTATE_CLNT_GOT_REPLY, /* [CLIENT] completely received operation reply */
10379 +} __attribute__((packed));
10381 +extern const char *rxrpc_call_states[];
10383 +enum rxrpc_app_estate {
10384 + RXRPC_ESTATE_NO_ERROR = 0, /* no error */
10385 + RXRPC_ESTATE_LOCAL_ABORT, /* aborted locally by application layer */
10386 + RXRPC_ESTATE_PEER_ABORT, /* aborted remotely by peer */
10387 + RXRPC_ESTATE_LOCAL_ERROR, /* local ICMP network error */
10388 + RXRPC_ESTATE_REMOTE_ERROR, /* remote ICMP network error */
10389 +} __attribute__((packed));
10391 +extern const char *rxrpc_call_error_states[];
10393 +/*****************************************************************************/
10395 + * Rx call record and application scratch buffer
10396 + * - the call record occupies the bottom of a complete page
10397 + * - the application scratch buffer occupies the rest
10402 + struct rxrpc_connection *conn; /* connection upon which active */
10403 + spinlock_t lock; /* access lock */
10404 + struct module *owner; /* owner module */
10405 + wait_queue_head_t waitq; /* wait queue for events to happen */
10406 + struct list_head link; /* general internal list link */
10407 + struct list_head call_link; /* master call list link */
10408 + u32 chan_ix; /* connection channel index (net order) */
10409 + u32 call_id; /* call ID on connection (net order) */
10410 + unsigned long cjif; /* jiffies at call creation */
10411 + unsigned long flags; /* control flags */
10412 +#define RXRPC_CALL_ACKS_TIMO 0x00000001 /* ACKS timeout reached */
10413 +#define RXRPC_CALL_ACKR_TIMO 0x00000002 /* ACKR timeout reached */
10414 +#define RXRPC_CALL_RCV_TIMO 0x00000004 /* RCV timeout reached */
10415 +#define RXRPC_CALL_RCV_PKT 0x00000008 /* received packet */
10417 + /* transmission */
10418 + rxrpc_seq_t snd_seq_count; /* outgoing packet sequence number counter */
10419 + struct rxrpc_message *snd_nextmsg; /* next message being constructed for sending */
10420 + struct rxrpc_message *snd_ping; /* last ping message sent */
10421 + unsigned short snd_resend_cnt; /* count of resends since last ACK */
10423 + /* transmission ACK tracking */
10424 + struct list_head acks_pendq; /* messages pending ACK (ordered by seq) */
10425 + unsigned acks_pend_cnt; /* number of un-ACK'd packets */
10426 + rxrpc_seq_t acks_dftv_seq; /* highest definitively ACK'd msg seq */
10427 + struct timer_list acks_timeout; /* timeout on expected ACK */
10430 + struct list_head rcv_receiveq; /* messages pending reception (ordered by seq) */
10431 + struct list_head rcv_krxiodq_lk; /* krxiod queue for new inbound packets */
10432 + struct timer_list rcv_timeout; /* call receive activity timeout */
10434 + /* reception ACK'ing */
10435 + rxrpc_seq_t ackr_win_bot; /* bottom of ACK window */
10436 + rxrpc_seq_t ackr_win_top; /* top of ACK window */
10437 + rxrpc_seq_t ackr_high_seq; /* highest seqno yet received */
10438 + rxrpc_seq_t ackr_prev_seq; /* previous seqno received */
10439 + unsigned ackr_pend_cnt; /* number of pending ACKs */
10440 + struct timer_list ackr_dfr_timo; /* timeout on deferred ACK */
10441 + char ackr_dfr_perm; /* request for deferred ACKs permitted */
10442 + rxrpc_seq_t ackr_dfr_seq; /* seqno for deferred ACK */
10443 + struct rxrpc_ackpacket ackr; /* pending normal ACK packet */
10444 + u8 ackr_array[RXRPC_CALL_ACK_WINDOW_SIZE]; /* ACK records */
10446 + /* presentation layer */
10447 + char app_last_rcv; /* T if received last packet from remote end */
10448 + enum rxrpc_app_cstate app_call_state; /* call state */
10449 + enum rxrpc_app_estate app_err_state; /* abort/error state */
10450 + struct list_head app_readyq; /* ordered ready received packet queue */
10451 + struct list_head app_unreadyq; /* ordered post-hole recv'd packet queue */
10452 + rxrpc_seq_t app_ready_seq; /* last seq number dropped into readyq */
10453 + size_t app_ready_qty; /* amount of data ready in readyq */
10454 + unsigned app_opcode; /* operation ID */
10455 + unsigned app_abort_code; /* abort code (when aborted) */
10456 + int app_errno; /* error number (when ICMP error received) */
10459 + unsigned pkt_rcv_count; /* count of received packets on this call */
10460 + unsigned pkt_snd_count; /* count of sent packets on this call */
10461 + unsigned app_read_count; /* number of reads issued */
10463 + /* bits for the application to use */
10464 + rxrpc_call_attn_func_t app_attn_func; /* callback when attention required */
10465 + rxrpc_call_error_func_t app_error_func; /* callback when abort sent (cleanup and put) */
10466 + rxrpc_call_aemap_func_t app_aemap_func; /* callback to map abort code to/from errno */
10467 + void *app_user; /* application data */
10468 + struct list_head app_link; /* application list linkage */
10469 + struct list_head app_attn_link; /* application attention list linkage */
10470 + size_t app_mark; /* trigger callback when app_ready_qty>=app_mark */
10471 + char app_async_read; /* T if in async-read mode */
10472 + u8 *app_read_buf; /* application async read buffer (app_mark size) */
10473 + u8 *app_scr_alloc; /* application scratch allocation pointer */
10474 + void *app_scr_ptr; /* application pointer into scratch buffer */
10476 +#define RXRPC_APP_MARK_EOF 0xFFFFFFFFU /* mark at end of input */
10478 + /* application scratch buffer */
10479 + u8 app_scratch[0] __attribute__((aligned(sizeof(long))));
10482 +#define RXRPC_CALL_SCRATCH_SIZE (PAGE_SIZE - sizeof(struct rxrpc_call))
10484 +#define rxrpc_call_reset_scratch(CALL) \
10485 +do { (CALL)->app_scr_alloc = (CALL)->app_scratch; } while(0)
10487 +#define rxrpc_call_alloc_scratch(CALL,SIZE) \
10490 + ptr = (CALL)->app_scr_alloc; \
10491 + (CALL)->app_scr_alloc += (SIZE); \
10492 + if ((SIZE)>RXRPC_CALL_SCRATCH_SIZE || \
10493 + (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) { \
10494 + printk("rxrpc_call_alloc_scratch(%p,%u)\n",(CALL),(SIZE)); \
10500 +#define rxrpc_call_alloc_scratch_s(CALL,TYPE) \
10502 + size_t size = sizeof(TYPE); \
10504 + ptr = (TYPE*)(CALL)->app_scr_alloc; \
10505 + (CALL)->app_scr_alloc += size; \
10506 + if (size>RXRPC_CALL_SCRATCH_SIZE || \
10507 + (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) { \
10508 + printk("rxrpc_call_alloc_scratch(%p,%u)\n",(CALL),size); \
10514 +#define rxrpc_call_is_ack_pending(CALL) ((CALL)->ackr.reason != 0)
10516 +extern int rxrpc_create_call(struct rxrpc_connection *conn,
10517 + rxrpc_call_attn_func_t attn,
10518 + rxrpc_call_error_func_t error,
10519 + rxrpc_call_aemap_func_t aemap,
10520 + struct rxrpc_call **_call);
10522 +extern int rxrpc_incoming_call(struct rxrpc_connection *conn,
10523 + struct rxrpc_message *msg,
10524 + struct rxrpc_call **_call);
10526 +static inline void rxrpc_get_call(struct rxrpc_call *call)
10528 + if (atomic_read(&call->usage)<=0)
10530 + atomic_inc(&call->usage);
10531 + /*printk("rxrpc_get_call(%p{u=%d})\n",(C),atomic_read(&(C)->usage));*/
10534 +extern void rxrpc_put_call(struct rxrpc_call *call);
10536 +extern void rxrpc_call_do_stuff(struct rxrpc_call *call);
10538 +extern int rxrpc_call_abort(struct rxrpc_call *call, int error);
10540 +#define RXRPC_CALL_READ_BLOCK 0x0001 /* block if not enough data and not yet EOF */
10541 +#define RXRPC_CALL_READ_ALL 0x0002 /* error if insufficient data received */
10542 +extern int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int flags);
10544 +extern int rxrpc_call_write_data(struct rxrpc_call *call,
10546 + struct iovec siov[],
10550 + size_t *size_sent);
10552 +extern int rxrpc_call_flush(struct rxrpc_call *call);
10554 +extern void rxrpc_call_handle_error(struct rxrpc_call *conn, int local, int errno);
10556 +#endif /* _LINUX_RXRPC_CALL_H */
10557 diff -urNp linux-5240/include/rxrpc/connection.h linux-5250/include/rxrpc/connection.h
10558 --- linux-5240/include/rxrpc/connection.h 1970-01-01 01:00:00.000000000 +0100
10559 +++ linux-5250/include/rxrpc/connection.h
10561 +/* connection.h: Rx connection record
10563 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10564 + * Written by David Howells (dhowells@redhat.com)
10566 + * This program is free software; you can redistribute it and/or
10567 + * modify it under the terms of the GNU General Public License
10568 + * as published by the Free Software Foundation; either version
10569 + * 2 of the License, or (at your option) any later version.
10572 +#ifndef _LINUX_RXRPC_CONNECTION_H
10573 +#define _LINUX_RXRPC_CONNECTION_H
10575 +#include <rxrpc/types.h>
10576 +#include <rxrpc/krxtimod.h>
10580 +/*****************************************************************************/
10583 + * - connections are matched by (rmt_port,rmt_addr,service_id,conn_id,clientflag)
10584 + * - connections only retain a refcount on the peer when they are active
10585 + * - connections with refcount==0 are inactive and reside in the peer's graveyard
10587 +struct rxrpc_connection
10590 + struct rxrpc_transport *trans; /* transport endpoint */
10591 + struct rxrpc_peer *peer; /* peer from/to which connected */
10592 + struct rxrpc_service *service; /* responsible service (inbound conns) */
10593 + struct rxrpc_timer timeout; /* decaching timer */
10594 + struct list_head link; /* link in peer's list */
10595 + struct list_head proc_link; /* link in proc list */
10596 + struct list_head err_link; /* link in ICMP error processing list */
10597 + struct sockaddr_in addr; /* remote address */
10598 + struct rxrpc_call *channels[4]; /* channels (active calls) */
10599 + wait_queue_head_t chanwait; /* wait for channel to become available */
10600 + spinlock_t lock; /* access lock */
10601 + struct timeval atime; /* last access time */
10602 + size_t mtu_size; /* MTU size for outbound messages */
10603 + unsigned call_counter; /* call ID counter */
10604 + rxrpc_serial_t serial_counter; /* packet serial number counter */
10606 + /* the following should all be in net order */
10607 + u32 in_epoch; /* peer's epoch */
10608 + u32 out_epoch; /* my epoch */
10609 + u32 conn_id; /* connection ID, appropriately shifted */
10610 + u16 service_id; /* service ID */
10611 + u8 security_ix; /* security ID */
10612 + u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
10613 + u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
10616 +extern int rxrpc_create_connection(struct rxrpc_transport *trans,
10619 + unsigned short service_id,
10621 + struct rxrpc_connection **_conn);
10623 +extern int rxrpc_connection_lookup(struct rxrpc_peer *peer,
10624 + struct rxrpc_message *msg,
10625 + struct rxrpc_connection **_conn);
10627 +static inline void rxrpc_get_connection(struct rxrpc_connection *conn)
10629 + if (atomic_read(&conn->usage)<0)
10631 + atomic_inc(&conn->usage);
10632 + //printk("rxrpc_get_conn(%p{u=%d})\n",conn,atomic_read(&conn->usage));
10635 +extern void rxrpc_put_connection(struct rxrpc_connection *conn);
10637 +extern int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
10638 + struct rxrpc_call *call,
10639 + struct rxrpc_message *msg);
10641 +extern void rxrpc_conn_handle_error(struct rxrpc_connection *conn, int local, int errno);
10643 +#endif /* _LINUX_RXRPC_CONNECTION_H */
10644 diff -urNp linux-5240/include/rxrpc/krxiod.h linux-5250/include/rxrpc/krxiod.h
10645 --- linux-5240/include/rxrpc/krxiod.h 1970-01-01 01:00:00.000000000 +0100
10646 +++ linux-5250/include/rxrpc/krxiod.h
10648 +/* krxiod.h: Rx RPC I/O kernel thread interface
10650 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10651 + * Written by David Howells (dhowells@redhat.com)
10653 + * This program is free software; you can redistribute it and/or
10654 + * modify it under the terms of the GNU General Public License
10655 + * as published by the Free Software Foundation; either version
10656 + * 2 of the License, or (at your option) any later version.
10659 +#ifndef _LINUX_RXRPC_KRXIOD_H
10660 +#define _LINUX_RXRPC_KRXIOD_H
10662 +#include <rxrpc/types.h>
10664 +extern int rxrpc_krxiod_init(void);
10665 +extern void rxrpc_krxiod_kill(void);
10666 +extern void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans);
10667 +extern void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans);
10668 +extern void rxrpc_krxiod_queue_peer(struct rxrpc_peer *peer);
10669 +extern void rxrpc_krxiod_dequeue_peer(struct rxrpc_peer *peer);
10670 +extern void rxrpc_krxiod_clear_peers(struct rxrpc_transport *trans);
10671 +extern void rxrpc_krxiod_queue_call(struct rxrpc_call *call);
10672 +extern void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call);
10674 +#endif /* _LINUX_RXRPC_KRXIOD_H */
10675 diff -urNp linux-5240/include/rxrpc/krxsecd.h linux-5250/include/rxrpc/krxsecd.h
10676 --- linux-5240/include/rxrpc/krxsecd.h 1970-01-01 01:00:00.000000000 +0100
10677 +++ linux-5250/include/rxrpc/krxsecd.h
10679 +/* krxsecd.h: Rx RPC security kernel thread interface
10681 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10682 + * Written by David Howells (dhowells@redhat.com)
10684 + * This program is free software; you can redistribute it and/or
10685 + * modify it under the terms of the GNU General Public License
10686 + * as published by the Free Software Foundation; either version
10687 + * 2 of the License, or (at your option) any later version.
10690 +#ifndef _LINUX_RXRPC_KRXSECD_H
10691 +#define _LINUX_RXRPC_KRXSECD_H
10693 +#include <rxrpc/types.h>
10695 +extern int rxrpc_krxsecd_init(void);
10696 +extern void rxrpc_krxsecd_kill(void);
10697 +extern void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans);
10698 +extern void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg);
10700 +#endif /* _LINUX_RXRPC_KRXSECD_H */
10701 diff -urNp linux-5240/include/rxrpc/krxtimod.h linux-5250/include/rxrpc/krxtimod.h
10702 --- linux-5240/include/rxrpc/krxtimod.h 1970-01-01 01:00:00.000000000 +0100
10703 +++ linux-5250/include/rxrpc/krxtimod.h
10705 +/* krxtimod.h: RxRPC timeout daemon
10707 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10708 + * Written by David Howells (dhowells@redhat.com)
10710 + * This program is free software; you can redistribute it and/or
10711 + * modify it under the terms of the GNU General Public License
10712 + * as published by the Free Software Foundation; either version
10713 + * 2 of the License, or (at your option) any later version.
10716 +#ifndef _LINUX_RXRPC_KRXTIMOD_H
10717 +#define _LINUX_RXRPC_KRXTIMOD_H
10719 +#include <rxrpc/types.h>
10721 +struct rxrpc_timer_ops {
10722 + /* called when the front of the timer queue has timed out */
10723 + void (*timed_out)(struct rxrpc_timer *timer);
10726 +/*****************************************************************************/
10728 + * RXRPC timer/timeout record
10730 +struct rxrpc_timer
10732 + struct list_head link; /* link in timer queue */
10733 + unsigned long timo_jif; /* timeout time */
10734 + const struct rxrpc_timer_ops *ops; /* timeout expiry function */
10737 +static inline void rxrpc_timer_init(rxrpc_timer_t *timer, const struct rxrpc_timer_ops *ops)
10739 + INIT_LIST_HEAD(&timer->link);
10740 + timer->ops = ops;
10743 +extern int rxrpc_krxtimod_start(void);
10744 +extern void rxrpc_krxtimod_kill(void);
10746 +extern void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout);
10747 +extern int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer);
10749 +#endif /* _LINUX_RXRPC_KRXTIMOD_H */
10750 diff -urNp linux-5240/include/rxrpc/message.h linux-5250/include/rxrpc/message.h
10751 --- linux-5240/include/rxrpc/message.h 1970-01-01 01:00:00.000000000 +0100
10752 +++ linux-5250/include/rxrpc/message.h
10754 +/* message.h: Rx message caching
10756 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10757 + * Written by David Howells (dhowells@redhat.com)
10759 + * This program is free software; you can redistribute it and/or
10760 + * modify it under the terms of the GNU General Public License
10761 + * as published by the Free Software Foundation; either version
10762 + * 2 of the License, or (at your option) any later version.
10765 +#ifndef _H_3AD3363A_3A9C_11D6_83D8_0002B3163499
10766 +#define _H_3AD3363A_3A9C_11D6_83D8_0002B3163499
10768 +#include <rxrpc/packet.h>
10770 +/*****************************************************************************/
10772 + * Rx message record
10774 +struct rxrpc_message
10777 + struct list_head link; /* list link */
10778 + struct timeval stamp; /* time received or last sent */
10779 + rxrpc_seq_t seq; /* message sequence number */
10781 + int state; /* the state the message is currently in */
10782 +#define RXRPC_MSG_PREPARED 0
10783 +#define RXRPC_MSG_SENT 1
10784 +#define RXRPC_MSG_ACKED 2 /* provisionally ACK'd */
10785 +#define RXRPC_MSG_DONE 3 /* definitively ACK'd (msg->seq<ack.firstPacket) */
10786 +#define RXRPC_MSG_RECEIVED 4
10787 +#define RXRPC_MSG_ERROR -1
10788 + char rttdone; /* used for RTT */
10790 + struct rxrpc_transport *trans; /* transport received through */
10791 + struct rxrpc_connection *conn; /* connection received over */
10792 + struct sk_buff *pkt; /* received packet */
10793 + off_t offset; /* offset into pkt of next byte of data */
10795 + struct rxrpc_header hdr; /* message header */
10797 + int dcount; /* data part count */
10798 + size_t dsize; /* data size */
10799 +#define RXRPC_MSG_MAX_IOCS 8
10800 + struct iovec data[RXRPC_MSG_MAX_IOCS]; /* message data */
10801 + unsigned long dfree; /* bit mask indicating kfree(data[x]) if T */
10804 +#define rxrpc_get_message(M) do { atomic_inc(&(M)->usage); } while(0)
10806 +extern void __rxrpc_put_message(struct rxrpc_message *msg);
10807 +static inline void rxrpc_put_message(struct rxrpc_message *msg)
10809 + if (atomic_read(&msg->usage)<=0)
10811 + if (atomic_dec_and_test(&msg->usage))
10812 + __rxrpc_put_message(msg);
10815 +extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
10816 + struct rxrpc_call *call,
10819 + struct iovec diov[],
10821 + struct rxrpc_message **_msg);
10823 +extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
10825 +#endif /* _H_3AD3363A_3A9C_11D6_83D8_0002B3163499 */
10826 diff -urNp linux-5240/include/rxrpc/packet.h linux-5250/include/rxrpc/packet.h
10827 --- linux-5240/include/rxrpc/packet.h 1970-01-01 01:00:00.000000000 +0100
10828 +++ linux-5250/include/rxrpc/packet.h
10830 +/* packet.h: Rx packet layout and definitions
10832 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10833 + * Written by David Howells (dhowells@redhat.com)
10835 + * This program is free software; you can redistribute it and/or
10836 + * modify it under the terms of the GNU General Public License
10837 + * as published by the Free Software Foundation; either version
10838 + * 2 of the License, or (at your option) any later version.
10841 +#ifndef _LINUX_RXRPC_PACKET_H
10842 +#define _LINUX_RXRPC_PACKET_H
10844 +#include <rxrpc/types.h>
10846 +#define RXRPC_IPUDP_SIZE 28
10847 +extern size_t RXRPC_MAX_PACKET_SIZE;
10848 +#define RXRPC_MAX_PACKET_DATA_SIZE (RXRPC_MAX_PACKET_SIZE - sizeof(struct rxrpc_header))
10849 +#define RXRPC_LOCAL_PACKET_SIZE RXRPC_MAX_PACKET_SIZE
10850 +#define RXRPC_REMOTE_PACKET_SIZE (576 - RXRPC_IPUDP_SIZE)
10852 +/*****************************************************************************/
10854 + * on-the-wire Rx packet header
10855 + * - all multibyte fields should be in network byte order
10857 +struct rxrpc_header
10859 + u32 epoch; /* client boot timestamp */
10861 + u32 cid; /* connection and channel ID */
10862 +#define RXRPC_MAXCALLS 4 /* max active calls per conn */
10863 +#define RXRPC_CHANNELMASK (RXRPC_MAXCALLS-1) /* mask for channel ID */
10864 +#define RXRPC_CIDMASK (~RXRPC_CHANNELMASK) /* mask for connection ID */
10865 +#define RXRPC_CIDSHIFT 2 /* shift for connection ID */
10867 + u32 callNumber; /* call ID (0 for connection-level packets) */
10868 +#define RXRPC_PROCESS_MAXCALLS (1<<2) /* maximum number of active calls per conn (power of 2) */
10870 + u32 seq; /* sequence number of pkt in call stream */
10871 + u32 serial; /* serial number of pkt sent to network */
10873 + u8 type; /* packet type */
10874 +#define RXRPC_PACKET_TYPE_DATA 1 /* data */
10875 +#define RXRPC_PACKET_TYPE_ACK 2 /* ACK */
10876 +#define RXRPC_PACKET_TYPE_BUSY 3 /* call reject */
10877 +#define RXRPC_PACKET_TYPE_ABORT 4 /* call/connection abort */
10878 +#define RXRPC_PACKET_TYPE_ACKALL 5 /* ACK all outstanding packets on call */
10879 +#define RXRPC_PACKET_TYPE_CHALLENGE 6 /* connection security challenge (SRVR->CLNT) */
10880 +#define RXRPC_PACKET_TYPE_RESPONSE 7 /* connection secutity response (CLNT->SRVR) */
10881 +#define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */
10882 +#define RXRPC_N_PACKET_TYPES 9 /* number of packet types (incl type 0) */
10884 + u8 flags; /* packet flags */
10885 +#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
10886 +#define RXRPC_REQUEST_ACK 0x02 /* request an unconditional ACK of this packet */
10887 +#define RXRPC_LAST_PACKET 0x04 /* the last packet from this side for this call */
10888 +#define RXRPC_MORE_PACKETS 0x08 /* more packets to come */
10889 +#define RXRPC_JUMBO_PACKET 0x20 /* [DATA] this is a jumbo packet */
10890 +#define RXRPC_SLOW_START_OK 0x20 /* [ACK] slow start supported */
10892 + u8 userStatus; /* app-layer defined status */
10893 + u8 securityIndex; /* security protocol ID */
10894 + u16 _rsvd; /* reserved (used by kerberos security as cksum) */
10895 + u16 serviceId; /* service ID */
10897 +} __attribute__((packed));
10899 +#define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X)
10901 +extern const char *rxrpc_pkts[];
10903 +/*****************************************************************************/
10905 + * jumbo packet secondary header
10906 + * - can be mapped to read header by:
10907 + * - new_serial = serial + 1
10908 + * - new_seq = seq + 1
10909 + * - new_flags = j_flags
10910 + * - new__rsvd = j__rsvd
10911 + * - duplicating all other fields
10913 +struct rxrpc_jumbo_header
10915 + u8 flags; /* packet flags (as per rxrpc_header) */
10917 + u16 _rsvd; /* reserved (used by kerberos security as cksum) */
10920 +#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
10922 +/*****************************************************************************/
10924 + * on-the-wire Rx ACK packet data payload
10925 + * - all multibyte fields should be in network byte order
10927 +struct rxrpc_ackpacket
10929 + u16 bufferSpace; /* number of packet buffers available */
10930 + u16 maxSkew; /* diff between serno being ACK'd and highest serial no received */
10931 + u32 firstPacket; /* sequence no of first ACK'd packet in attached list */
10932 + u32 previousPacket; /* sequence no of previous packet received */
10933 + u32 serial; /* serial no of packet that prompted this ACK */
10935 + u8 reason; /* reason for ACK */
10936 +#define RXRPC_ACK_REQUESTED 1 /* ACK was requested on packet */
10937 +#define RXRPC_ACK_DUPLICATE 2 /* duplicate packet received */
10938 +#define RXRPC_ACK_OUT_OF_SEQUENCE 3 /* out of sequence packet received */
10939 +#define RXRPC_ACK_EXCEEDS_WINDOW 4 /* packet received beyond end of ACK window */
10940 +#define RXRPC_ACK_NOSPACE 5 /* packet discarded due to lack of buffer space */
10941 +#define RXRPC_ACK_PING 6 /* keep alive ACK */
10942 +#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */
10943 +#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */
10944 +#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */
10946 + u8 nAcks; /* number of ACKs */
10947 +#define RXRPC_MAXACKS 255
10949 + u8 acks[0]; /* list of ACK/NAKs */
10950 +#define RXRPC_ACK_TYPE_NACK 0
10951 +#define RXRPC_ACK_TYPE_ACK 1
10953 +} __attribute__((packed));
10955 +extern const char *rxrpc_acks[];
10957 +#endif /* _LINUX_RXRPC_PACKET_H */
10958 diff -urNp linux-5240/include/rxrpc/peer.h linux-5250/include/rxrpc/peer.h
10959 --- linux-5240/include/rxrpc/peer.h 1970-01-01 01:00:00.000000000 +0100
10960 +++ linux-5250/include/rxrpc/peer.h
10962 +/* peer.h: Rx RPC per-transport peer record
10964 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
10965 + * Written by David Howells (dhowells@redhat.com)
10967 + * This program is free software; you can redistribute it and/or
10968 + * modify it under the terms of the GNU General Public License
10969 + * as published by the Free Software Foundation; either version
10970 + * 2 of the License, or (at your option) any later version.
10973 +#ifndef _LINUX_RXRPC_PEER_H
10974 +#define _LINUX_RXRPC_PEER_H
10976 +#include <linux/wait.h>
10977 +#include <rxrpc/types.h>
10978 +#include <rxrpc/krxtimod.h>
10980 +struct rxrpc_peer_ops
10982 + /* peer record being added */
10983 + int (*adding)(struct rxrpc_peer *peer);
10985 + /* peer record being discarded from graveyard */
10986 + void (*discarding)(struct rxrpc_peer *peer);
10988 + /* change of epoch detected on connection */
10989 + void (*change_of_epoch)(struct rxrpc_connection *conn);
10992 +/*****************************************************************************/
10994 + * Rx RPC per-transport peer record
10995 + * - peers only retain a refcount on the transport when they are active
10996 + * - peers with refcount==0 are inactive and reside in the transport's graveyard
11001 + struct rxrpc_peer_ops *ops; /* operations on this peer */
11002 + struct rxrpc_transport *trans; /* owner transport */
11003 + struct rxrpc_timer timeout; /* timeout for grave destruction */
11004 + struct list_head link; /* link in transport's peer list */
11005 + struct list_head proc_link; /* link in /proc list */
11006 + rwlock_t conn_lock; /* lock for connections */
11007 + struct list_head conn_active; /* active connections to/from this peer */
11008 + struct list_head conn_graveyard; /* graveyard for inactive connections */
11009 + spinlock_t conn_gylock; /* lock for conn_graveyard */
11010 + wait_queue_head_t conn_gy_waitq; /* wait queue hit when graveyard is empty */
11011 + atomic_t conn_count; /* number of attached connections */
11012 + struct in_addr addr; /* remote address */
11013 + size_t if_mtu; /* interface MTU for this peer */
11014 + spinlock_t lock; /* access lock */
11016 + void *user; /* application layer data */
11018 + /* calculated RTT cache */
11019 +#define RXRPC_RTT_CACHE_SIZE 32
11020 + suseconds_t rtt; /* current RTT estimate (in uS) */
11021 + unsigned short rtt_point; /* next entry at which to insert */
11022 + unsigned short rtt_usage; /* amount of cache actually used */
11023 + suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
11027 +extern int rxrpc_peer_lookup(struct rxrpc_transport *trans,
11029 + struct rxrpc_peer **_peer);
11031 +static inline void rxrpc_get_peer(struct rxrpc_peer *peer)
11033 + if (atomic_read(&peer->usage)<0)
11035 + atomic_inc(&peer->usage);
11036 + //printk("rxrpc_get_peer(%p{u=%d})\n",peer,atomic_read(&peer->usage));
11039 +extern void rxrpc_put_peer(struct rxrpc_peer *peer);
11041 +#endif /* _LINUX_RXRPC_PEER_H */
11042 diff -urNp linux-5240/include/rxrpc/rxrpc.h linux-5250/include/rxrpc/rxrpc.h
11043 --- linux-5240/include/rxrpc/rxrpc.h 1970-01-01 01:00:00.000000000 +0100
11044 +++ linux-5250/include/rxrpc/rxrpc.h
11046 +/* rx.h: Rx RPC interface
11048 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
11049 + * Written by David Howells (dhowells@redhat.com)
11051 + * This program is free software; you can redistribute it and/or
11052 + * modify it under the terms of the GNU General Public License
11053 + * as published by the Free Software Foundation; either version
11054 + * 2 of the License, or (at your option) any later version.
11057 +#ifndef _LINUX_RXRPC_RXRPC_H
11058 +#define _LINUX_RXRPC_RXRPC_H
11062 +extern u32 rxrpc_epoch;
11064 +extern int rxrpc_ktrace;
11065 +extern int rxrpc_kdebug;
11066 +extern int rxrpc_kproto;
11067 +extern int rxrpc_knet;
11069 +extern int rxrpc_sysctl_init(void);
11070 +extern void rxrpc_sysctl_cleanup(void);
11072 +#endif /* __KERNEL__ */
11074 +#endif /* _LINUX_RXRPC_RXRPC_H */
11075 diff -urNp linux-5240/include/rxrpc/transport.h linux-5250/include/rxrpc/transport.h
11076 --- linux-5240/include/rxrpc/transport.h 1970-01-01 01:00:00.000000000 +0100
11077 +++ linux-5250/include/rxrpc/transport.h
11079 +/* transport.h: Rx transport management
11081 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
11082 + * Written by David Howells (dhowells@redhat.com)
11084 + * This program is free software; you can redistribute it and/or
11085 + * modify it under the terms of the GNU General Public License
11086 + * as published by the Free Software Foundation; either version
11087 + * 2 of the License, or (at your option) any later version.
11090 +#ifndef _LINUX_RXRPC_TRANSPORT_H
11091 +#define _LINUX_RXRPC_TRANSPORT_H
11093 +#include <rxrpc/types.h>
11094 +#include <rxrpc/krxiod.h>
11095 +#include <rxrpc/rxrpc.h>
11096 +#include <linux/skbuff.h>
11097 +#include <linux/rwsem.h>
11099 +typedef int (*rxrpc_newcall_fnx_t)(struct rxrpc_call *call);
11101 +extern wait_queue_head_t rxrpc_krxiod_wq;
11103 +/*****************************************************************************/
11105 + * Rx operation specification
11106 + * - tables of these must be sorted by op ID so that they can be binary-chop searched
11108 +struct rxrpc_operation
11110 + unsigned id; /* operation ID */
11111 + size_t asize; /* minimum size of argument block */
11112 + const char *name; /* name of operation */
11113 + void *user; /* initial user data */
11116 +/*****************************************************************************/
11118 + * Rx transport service record
11120 +struct rxrpc_service
11122 + struct list_head link; /* link in services list on transport */
11123 + struct module *owner; /* owner module */
11124 + rxrpc_newcall_fnx_t new_call; /* new call handler function */
11125 + const char *name; /* name of service */
11126 + unsigned short service_id; /* Rx service ID */
11127 + rxrpc_call_attn_func_t attn_func; /* call requires attention callback */
11128 + rxrpc_call_error_func_t error_func; /* call error callback */
11129 + rxrpc_call_aemap_func_t aemap_func; /* abort -> errno mapping callback */
11131 + const struct rxrpc_operation *ops_begin; /* beginning of operations table */
11132 + const struct rxrpc_operation *ops_end; /* end of operations table */
11135 +/*****************************************************************************/
11137 + * Rx transport endpoint record
11139 +struct rxrpc_transport
11142 + struct socket *socket; /* my UDP socket */
11143 + struct list_head services; /* services listening on this socket */
11144 + struct list_head link; /* link in transport list */
11145 + struct list_head proc_link; /* link in transport proc list */
11146 + struct list_head krxiodq_link; /* krxiod attention queue link */
11147 + spinlock_t lock; /* access lock */
11148 + struct list_head peer_active; /* active peers connected to over this socket */
11149 + struct list_head peer_graveyard; /* inactive peer list */
11150 + spinlock_t peer_gylock; /* peer graveyard lock */
11151 + wait_queue_head_t peer_gy_waitq; /* wait queue hit when peer graveyard is empty */
11152 + rwlock_t peer_lock; /* peer list access lock */
11153 + atomic_t peer_count; /* number of peers */
11154 + struct rxrpc_peer_ops *peer_ops; /* default peer operations */
11155 + unsigned short port; /* port upon which listening */
11156 + volatile char error_rcvd; /* T if received ICMP error outstanding */
11159 +extern struct list_head rxrpc_transports;
11161 +extern int rxrpc_create_transport(unsigned short port,
11162 + struct rxrpc_transport **_trans);
11164 +static inline void rxrpc_get_transport(struct rxrpc_transport *trans)
11166 + if (atomic_read(&trans->usage)<=0)
11168 + atomic_inc(&trans->usage);
11169 + //printk("rxrpc_get_transport(%p{u=%d})\n",trans,atomic_read(&trans->usage));
11172 +extern void rxrpc_put_transport(struct rxrpc_transport *trans);
11174 +extern int rxrpc_add_service(struct rxrpc_transport *trans,
11175 + struct rxrpc_service *srv);
11177 +extern void rxrpc_del_service(struct rxrpc_transport *trans,
11178 + struct rxrpc_service *srv);
11181 +extern int rxrpc_trans_add_connection(struct rxrpc_transport *trans,
11182 + struct rxrpc_connection *conn);
11185 +extern void rxrpc_trans_receive_packet(struct rxrpc_transport *trans);
11187 +extern int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
11188 + struct rxrpc_message *msg,
11191 +extern void rxrpc_clear_transport(struct rxrpc_transport *trans);
11193 +#endif /* _LINUX_RXRPC_TRANSPORT_H */
11194 diff -urNp linux-5240/include/rxrpc/types.h linux-5250/include/rxrpc/types.h
11195 --- linux-5240/include/rxrpc/types.h 1970-01-01 01:00:00.000000000 +0100
11196 +++ linux-5250/include/rxrpc/types.h
11198 +/* types.h: Rx types
11200 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
11201 + * Written by David Howells (dhowells@redhat.com)
11203 + * This program is free software; you can redistribute it and/or
11204 + * modify it under the terms of the GNU General Public License
11205 + * as published by the Free Software Foundation; either version
11206 + * 2 of the License, or (at your option) any later version.
11209 +#ifndef _LINUX_RXRPC_TYPES_H
11210 +#define _LINUX_RXRPC_TYPES_H
11212 +#include <linux/types.h>
11213 +#include <linux/list.h>
11214 +#include <linux/socket.h>
11215 +#include <linux/in.h>
11216 +#include <linux/spinlock.h>
11217 +#include <asm/atomic.h>
11219 +typedef unsigned rxrpc_seq_t; /* Rx message sequence number */
11220 +typedef unsigned rxrpc_serial_t; /* Rx message serial number */
11222 +struct rxrpc_call;
11223 +struct rxrpc_connection;
11224 +struct rxrpc_header;
11225 +struct rxrpc_message;
11226 +struct rxrpc_operation;
11227 +struct rxrpc_peer;
11228 +struct rxrpc_service;
11229 +typedef struct rxrpc_timer rxrpc_timer_t;
11230 +struct rxrpc_transport;
11232 +typedef void (*rxrpc_call_attn_func_t)(struct rxrpc_call *call);
11233 +typedef void (*rxrpc_call_error_func_t)(struct rxrpc_call *call);
11234 +typedef void (*rxrpc_call_aemap_func_t)(struct rxrpc_call *call);
11236 +#endif /* _LINUX_RXRPC_TYPES_H */
11237 diff -urNp linux-5240/net/Makefile linux-5250/net/Makefile
11238 --- linux-5240/net/Makefile
11239 +++ linux-5250/net/Makefile
11241 O_TARGET := network.o
11243 mod-subdirs := ipv4/netfilter ipv6/netfilter bridge/netfilter ipx irda \
11244 - bluetooth atm netlink sched core
11245 + bluetooth atm netlink sched core rxrpc
11246 export-objs := netsyms.o
11248 subdir-y := core ethernet
11249 @@ -46,6 +46,7 @@ subdir-$(CONFIG_ATM) += atm
11250 subdir-$(CONFIG_DECNET) += decnet
11251 subdir-$(CONFIG_ECONET) += econet
11252 subdir-$(CONFIG_VLAN_8021Q) += 8021q
11253 +subdir-$(CONFIG_AFS_FS) += rxrpc
11256 obj-y := socket.o $(join $(subdir-y), $(patsubst %,/%.o,$(notdir $(subdir-y))))
11257 diff -urNp linux-5240/net/rxrpc/call.c linux-5250/net/rxrpc/call.c
11258 --- linux-5240/net/rxrpc/call.c 1970-01-01 01:00:00.000000000 +0100
11259 +++ linux-5250/net/rxrpc/call.c
11261 +/* call.c: Rx call routines
11263 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
11264 + * Written by David Howells (dhowells@redhat.com)
11266 + * This program is free software; you can redistribute it and/or
11267 + * modify it under the terms of the GNU General Public License
11268 + * as published by the Free Software Foundation; either version
11269 + * 2 of the License, or (at your option) any later version.
11272 +#include <linux/sched.h>
11273 +#include <linux/slab.h>
11274 +#include <linux/module.h>
11275 +#include <rxrpc/rxrpc.h>
11276 +#include <rxrpc/transport.h>
11277 +#include <rxrpc/peer.h>
11278 +#include <rxrpc/connection.h>
11279 +#include <rxrpc/call.h>
11280 +#include <rxrpc/message.h>
11281 +#include "internal.h"
11283 +__RXACCT_DECL(atomic_t rxrpc_call_count);
11284 +__RXACCT_DECL(atomic_t rxrpc_message_count);
11286 +LIST_HEAD(rxrpc_calls);
11287 +DECLARE_RWSEM(rxrpc_calls_sem);
11289 +unsigned rxrpc_call_rcv_timeout = 30;
11290 +unsigned rxrpc_call_acks_timeout = 30;
11291 +unsigned rxrpc_call_dfr_ack_timeout = 5;
11292 +unsigned short rxrpc_call_max_resend = 10;
11294 +const char *rxrpc_call_states[] = {
11300 + "SRVR_SND_REPLY",
11301 + "SRVR_RCV_FINAL_ACK",
11303 + "CLNT_RCV_REPLY",
11307 +const char *rxrpc_call_error_states[] = {
11315 +const char *rxrpc_pkts[] = {
11316 + "?00", "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug",
11317 + "?09", "?10", "?11", "?12", "?13", "?14", "?15"
11320 +const char *rxrpc_acks[] = {
11321 + "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", "-?-"
11324 +static const char _acktype[] = "NA-";
11326 +static void rxrpc_call_receive_packet(struct rxrpc_call *call);
11327 +static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc_message *msg);
11328 +static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_message *msg);
11329 +static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, rxrpc_seq_t higest);
11330 +static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest);
11331 +static int __rxrpc_call_read_data(struct rxrpc_call *call);
11333 +static int rxrpc_call_record_ACK(struct rxrpc_call *call,
11334 + struct rxrpc_message *msg,
11337 +#define _state(call) \
11338 + _debug("[[[ state %s ]]]",rxrpc_call_states[call->app_call_state]);
11340 +static void rxrpc_call_default_attn_func(struct rxrpc_call *call)
11342 + wake_up(&call->waitq);
11345 +static void rxrpc_call_default_error_func(struct rxrpc_call *call)
11347 + wake_up(&call->waitq);
11350 +static void rxrpc_call_default_aemap_func(struct rxrpc_call *call)
11352 + switch (call->app_err_state) {
11353 + case RXRPC_ESTATE_LOCAL_ABORT:
11354 + call->app_abort_code = -call->app_errno;
11355 + case RXRPC_ESTATE_PEER_ABORT:
11356 + call->app_errno = -ECONNABORTED;
11362 +static void __rxrpc_call_acks_timeout(unsigned long _call)
11364 + struct rxrpc_call *call = (struct rxrpc_call *) _call;
11366 + _debug("ACKS TIMEOUT %05lu",jiffies - call->cjif);
11368 + call->flags |= RXRPC_CALL_ACKS_TIMO;
11369 + rxrpc_krxiod_queue_call(call);
11372 +static void __rxrpc_call_rcv_timeout(unsigned long _call)
11374 + struct rxrpc_call *call = (struct rxrpc_call *) _call;
11376 + _debug("RCV TIMEOUT %05lu",jiffies - call->cjif);
11378 + call->flags |= RXRPC_CALL_RCV_TIMO;
11379 + rxrpc_krxiod_queue_call(call);
11382 +static void __rxrpc_call_ackr_timeout(unsigned long _call)
11384 + struct rxrpc_call *call = (struct rxrpc_call *) _call;
11386 + _debug("ACKR TIMEOUT %05lu",jiffies - call->cjif);
11388 + call->flags |= RXRPC_CALL_ACKR_TIMO;
11389 + rxrpc_krxiod_queue_call(call);
11392 +/*****************************************************************************/
11394 + * create a new call record
11396 +static inline int __rxrpc_create_call(struct rxrpc_connection *conn,
11397 + struct rxrpc_call **_call)
11399 + struct rxrpc_call *call;
11401 + _enter("%p",conn);
11403 + /* allocate and initialise a call record */
11404 + call = (struct rxrpc_call *) get_zeroed_page(GFP_KERNEL);
11406 + _leave(" ENOMEM");
11410 + atomic_set(&call->usage,1);
11412 + init_waitqueue_head(&call->waitq);
11413 + spin_lock_init(&call->lock);
11414 + INIT_LIST_HEAD(&call->link);
11415 + INIT_LIST_HEAD(&call->acks_pendq);
11416 + INIT_LIST_HEAD(&call->rcv_receiveq);
11417 + INIT_LIST_HEAD(&call->rcv_krxiodq_lk);
11418 + INIT_LIST_HEAD(&call->app_readyq);
11419 + INIT_LIST_HEAD(&call->app_unreadyq);
11420 + INIT_LIST_HEAD(&call->app_link);
11421 + INIT_LIST_HEAD(&call->app_attn_link);
11423 + init_timer(&call->acks_timeout);
11424 + call->acks_timeout.data = (unsigned long) call;
11425 + call->acks_timeout.function = __rxrpc_call_acks_timeout;
11427 + init_timer(&call->rcv_timeout);
11428 + call->rcv_timeout.data = (unsigned long) call;
11429 + call->rcv_timeout.function = __rxrpc_call_rcv_timeout;
11431 + init_timer(&call->ackr_dfr_timo);
11432 + call->ackr_dfr_timo.data = (unsigned long) call;
11433 + call->ackr_dfr_timo.function = __rxrpc_call_ackr_timeout;
11435 + call->conn = conn;
11436 + call->ackr_win_bot = 1;
11437 + call->ackr_win_top = call->ackr_win_bot + RXRPC_CALL_ACK_WINDOW_SIZE - 1;
11438 + call->ackr_prev_seq = 0;
11439 + call->app_mark = RXRPC_APP_MARK_EOF;
11440 + call->app_attn_func = rxrpc_call_default_attn_func;
11441 + call->app_error_func = rxrpc_call_default_error_func;
11442 + call->app_aemap_func = rxrpc_call_default_aemap_func;
11443 + call->app_scr_alloc = call->app_scratch;
11445 + call->cjif = jiffies;
11447 + _leave(" = 0 (%p)",call);
11452 +} /* end __rxrpc_create_call() */
11454 +/*****************************************************************************/
11456 + * create a new call record for outgoing calls
11458 +int rxrpc_create_call(struct rxrpc_connection *conn,
11459 + rxrpc_call_attn_func_t attn,
11460 + rxrpc_call_error_func_t error,
11461 + rxrpc_call_aemap_func_t aemap,
11462 + struct rxrpc_call **_call)
11464 + DECLARE_WAITQUEUE(myself,current);
11466 + struct rxrpc_call *call;
11467 + int ret, cix, loop;
11469 + _enter("%p",conn);
11471 + /* allocate and initialise a call record */
11472 + ret = __rxrpc_create_call(conn,&call);
11474 + _leave(" = %d",ret);
11478 + call->app_call_state = RXRPC_CSTATE_CLNT_SND_ARGS;
11479 + if (attn) call->app_attn_func = attn;
11480 + if (error) call->app_error_func = error;
11481 + if (aemap) call->app_aemap_func = aemap;
11485 + spin_lock(&conn->lock);
11486 + set_current_state(TASK_INTERRUPTIBLE);
11487 + add_wait_queue(&conn->chanwait,&myself);
11490 + /* try to find an unused channel */
11491 + for (cix=0; cix<4; cix++)
11492 + if (!conn->channels[cix])
11493 + goto obtained_chan;
11495 + /* no free channels - wait for one to become available */
11497 + if (signal_pending(current))
11498 + goto error_unwait;
11500 + spin_unlock(&conn->lock);
11503 + set_current_state(TASK_INTERRUPTIBLE);
11505 + spin_lock(&conn->lock);
11508 + /* got a channel - now attach to the connection */
11510 + remove_wait_queue(&conn->chanwait,&myself);
11511 + set_current_state(TASK_RUNNING);
11513 + /* concoct a unique call number */
11515 + call->call_id = htonl(++conn->call_counter);
11516 + for (loop=0; loop<4; loop++)
11517 + if (conn->channels[loop] && conn->channels[loop]->call_id==call->call_id)
11518 + goto next_callid;
11520 + rxrpc_get_connection(conn);
11521 + conn->channels[cix] = call; /* assign _after_ done callid check loop */
11522 + conn->atime = xtime;
11523 + call->chan_ix = htonl(cix);
11525 + spin_unlock(&conn->lock);
11527 + down_write(&rxrpc_calls_sem);
11528 + list_add_tail(&call->call_link,&rxrpc_calls);
11529 + up_write(&rxrpc_calls_sem);
11531 + __RXACCT(atomic_inc(&rxrpc_call_count));
11534 + _leave(" = 0 (call=%p cix=%u)",call,cix);
11538 + remove_wait_queue(&conn->chanwait,&myself);
11539 + set_current_state(TASK_RUNNING);
11540 + spin_unlock(&conn->lock);
11542 + free_page((unsigned long)call);
11543 + _leave(" = %d",ret);
11546 +} /* end rxrpc_create_call() */
11548 +/*****************************************************************************/
11550 + * create a new call record for incoming calls
11552 +int rxrpc_incoming_call(struct rxrpc_connection *conn,
11553 + struct rxrpc_message *msg,
11554 + struct rxrpc_call **_call)
11556 + struct rxrpc_call *call;
11560 + cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
11562 + _enter("%p,%u,%u",conn,ntohl(msg->hdr.callNumber),cix);
11564 + /* allocate and initialise a call record */
11565 + ret = __rxrpc_create_call(conn,&call);
11567 + _leave(" = %d",ret);
11571 + call->pkt_rcv_count = 1;
11572 + call->app_call_state = RXRPC_CSTATE_SRVR_RCV_OPID;
11573 + call->app_mark = sizeof(u32);
11577 + /* attach to the connection */
11579 + call->chan_ix = htonl(cix);
11580 + call->call_id = msg->hdr.callNumber;
11582 + spin_lock(&conn->lock);
11584 + if (!conn->channels[cix]) {
11585 + conn->channels[cix] = call;
11586 + rxrpc_get_connection(conn);
11590 + spin_unlock(&conn->lock);
11592 + if (ret<0) free_page((unsigned long)call);
11594 + _leave(" = %p",call);
11597 + down_write(&rxrpc_calls_sem);
11598 + list_add_tail(&call->call_link,&rxrpc_calls);
11599 + up_write(&rxrpc_calls_sem);
11600 + __RXACCT(atomic_inc(&rxrpc_call_count));
11605 +} /* end rxrpc_incoming_call() */
11607 +/*****************************************************************************/
11609 + * free a call record
11611 +void rxrpc_put_call(struct rxrpc_call *call)
11613 + struct rxrpc_connection *conn = call->conn;
11614 + struct rxrpc_message *msg;
11616 + _enter("%p{u=%d}",call,atomic_read(&call->usage));
11618 + /* sanity check */
11619 + if (atomic_read(&call->usage)<=0)
11622 + /* to prevent a race, the decrement and the de-list must be effectively atomic */
11623 + spin_lock(&conn->lock);
11624 + if (likely(!atomic_dec_and_test(&call->usage))) {
11625 + spin_unlock(&conn->lock);
11630 + conn->channels[ntohl(call->chan_ix)] = NULL;
11632 + spin_unlock(&conn->lock);
11634 + wake_up(&conn->chanwait);
11636 + rxrpc_put_connection(conn);
11638 + /* clear the timers and dequeue from krxiod */
11639 + del_timer_sync(&call->acks_timeout);
11640 + del_timer_sync(&call->rcv_timeout);
11641 + del_timer_sync(&call->ackr_dfr_timo);
11643 + rxrpc_krxiod_dequeue_call(call);
11645 + /* clean up the contents of the struct */
11646 + if (call->snd_nextmsg)
11647 + rxrpc_put_message(call->snd_nextmsg);
11649 + if (call->snd_ping)
11650 + rxrpc_put_message(call->snd_ping);
11652 + while (!list_empty(&call->acks_pendq)) {
11653 + msg = list_entry(call->acks_pendq.next,struct rxrpc_message,link);
11654 + list_del(&msg->link);
11655 + rxrpc_put_message(msg);
11658 + while (!list_empty(&call->rcv_receiveq)) {
11659 + msg = list_entry(call->rcv_receiveq.next,struct rxrpc_message,link);
11660 + list_del(&msg->link);
11661 + rxrpc_put_message(msg);
11664 + while (!list_empty(&call->app_readyq)) {
11665 + msg = list_entry(call->app_readyq.next,struct rxrpc_message,link);
11666 + list_del(&msg->link);
11667 + rxrpc_put_message(msg);
11670 + while (!list_empty(&call->app_unreadyq)) {
11671 + msg = list_entry(call->app_unreadyq.next,struct rxrpc_message,link);
11672 + list_del(&msg->link);
11673 + rxrpc_put_message(msg);
11676 + if (call->owner) __MOD_DEC_USE_COUNT(call->owner);
11678 + down_write(&rxrpc_calls_sem);
11679 + list_del(&call->call_link);
11680 + up_write(&rxrpc_calls_sem);
11682 + __RXACCT(atomic_dec(&rxrpc_call_count));
11683 + free_page((unsigned long)call);
11685 + _leave(" [destroyed]");
11686 +} /* end rxrpc_put_call() */
11688 +/*****************************************************************************/
11690 + * actually generate a normal ACK
11692 +static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call, rxrpc_seq_t seq)
11694 + struct rxrpc_message *msg;
11695 + struct iovec diov[3];
11699 + /* ACKs default to DELAY */
11700 + if (!call->ackr.reason)
11701 + call->ackr.reason = RXRPC_ACK_DELAY;
11703 + _proto("Rx %05lu Sending ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
11704 + jiffies - call->cjif,
11705 + ntohs(call->ackr.maxSkew),
11706 + ntohl(call->ackr.firstPacket),
11707 + ntohl(call->ackr.previousPacket),
11708 + ntohl(call->ackr.serial),
11709 + rxrpc_acks[call->ackr.reason],
11710 + call->ackr.nAcks);
11712 + aux[0] = htonl(call->conn->peer->if_mtu); /* interface MTU */
11713 + aux[1] = htonl(1444); /* max MTU */
11714 + aux[2] = htonl(16); /* rwind */
11715 + aux[3] = htonl(4); /* max packets */
11717 + diov[0].iov_len = sizeof(struct rxrpc_ackpacket);
11718 + diov[0].iov_base = &call->ackr;
11719 + diov[1].iov_len = (call->ackr_pend_cnt+3);
11720 + diov[1].iov_base = call->ackr_array;
11721 + diov[2].iov_len = sizeof(aux);
11722 + diov[2].iov_base = &aux;
11724 + /* build and send the message */
11725 + ret = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_ACK,3,diov,GFP_KERNEL,&msg);
11730 + msg->hdr.seq = htonl(seq);
11731 + msg->hdr.flags |= RXRPC_SLOW_START_OK;
11733 + ret = rxrpc_conn_sendmsg(call->conn,msg);
11734 + rxrpc_put_message(msg);
11737 + call->pkt_snd_count++;
11739 + /* count how many actual ACKs there were at the front */
11740 + for (delta=0; delta<call->ackr_pend_cnt; delta++)
11741 + if (call->ackr_array[delta]!=RXRPC_ACK_TYPE_ACK)
11744 + call->ackr_pend_cnt -= delta; /* all ACK'd to this point */
11746 + /* crank the ACK window around */
11748 + /* un-ACK'd window */
11750 + else if (delta < RXRPC_CALL_ACK_WINDOW_SIZE) {
11751 + /* partially ACK'd window
11752 + * - shuffle down to avoid losing out-of-sequence packets
11754 + call->ackr_win_bot += delta;
11755 + call->ackr_win_top += delta;
11757 + memmove(&call->ackr_array[0],
11758 + &call->ackr_array[delta],
11759 + call->ackr_pend_cnt);
11761 + memset(&call->ackr_array[call->ackr_pend_cnt],
11762 + RXRPC_ACK_TYPE_NACK,
11763 + sizeof(call->ackr_array) - call->ackr_pend_cnt);
11766 + /* fully ACK'd window
11767 + * - just clear the whole thing
11769 + memset(&call->ackr_array,RXRPC_ACK_TYPE_NACK,sizeof(call->ackr_array));
11772 + /* clear this ACK */
11773 + memset(&call->ackr,0,sizeof(call->ackr));
11776 + if (!call->app_call_state) printk("___ STATE 0 ___\n");
11778 +} /* end __rxrpc_call_gen_normal_ACK() */
11780 +/*****************************************************************************/
11782 + * note the reception of a packet in the call's ACK records and generate an appropriate ACK packet
11784 + * - returns 0 if packet should be processed, 1 if packet should be ignored and -ve on an error
11786 +static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
11787 + struct rxrpc_header *hdr,
11788 + struct rxrpc_ackpacket *ack)
11790 + struct rxrpc_message *msg;
11793 + int ret = 0, err;
11794 + u8 special_ACK, do_ACK, force;
11796 + _enter("%p,%p { seq=%d tp=%d fl=%02x }",call,hdr,ntohl(hdr->seq),hdr->type,hdr->flags);
11798 + seq = ntohl(hdr->seq);
11799 + offset = seq - call->ackr_win_bot;
11800 + do_ACK = RXRPC_ACK_DELAY;
11802 + force = (seq==1);
11804 + if (call->ackr_high_seq < seq)
11805 + call->ackr_high_seq = seq;
11807 + /* deal with generation of obvious special ACKs first */
11808 + if (ack && ack->reason==RXRPC_ACK_PING) {
11809 + special_ACK = RXRPC_ACK_PING_RESPONSE;
11814 + if (seq < call->ackr_win_bot) {
11815 + special_ACK = RXRPC_ACK_DUPLICATE;
11820 + if (seq >= call->ackr_win_top) {
11821 + special_ACK = RXRPC_ACK_EXCEEDS_WINDOW;
11826 + if (call->ackr_array[offset] != RXRPC_ACK_TYPE_NACK) {
11827 + special_ACK = RXRPC_ACK_DUPLICATE;
11832 + /* okay... it's a normal data packet inside the ACK window */
11833 + call->ackr_array[offset] = RXRPC_ACK_TYPE_ACK;
11835 + if (offset<call->ackr_pend_cnt) {
11837 + else if (offset>call->ackr_pend_cnt) {
11838 + do_ACK = RXRPC_ACK_OUT_OF_SEQUENCE;
11839 + call->ackr_pend_cnt = offset;
11843 + if (hdr->flags & RXRPC_REQUEST_ACK) {
11844 + do_ACK = RXRPC_ACK_REQUESTED;
11847 + /* generate an ACK on the final packet of a reply just received */
11848 + if (hdr->flags & RXRPC_LAST_PACKET) {
11849 + if (call->conn->out_clientflag)
11852 + else if (!(hdr->flags & RXRPC_MORE_PACKETS)) {
11853 + do_ACK = RXRPC_ACK_REQUESTED;
11856 + /* re-ACK packets previously received out-of-order */
11857 + for (offset++; offset<RXRPC_CALL_ACK_WINDOW_SIZE; offset++)
11858 + if (call->ackr_array[offset]!=RXRPC_ACK_TYPE_ACK)
11861 + call->ackr_pend_cnt = offset;
11863 + /* generate an ACK if we fill up the window */
11864 + if (call->ackr_pend_cnt >= RXRPC_CALL_ACK_WINDOW_SIZE)
11868 + _debug("%05lu ACKs pend=%u norm=%s special=%s%s",
11869 + jiffies - call->cjif,
11870 + call->ackr_pend_cnt,rxrpc_acks[do_ACK],rxrpc_acks[special_ACK],
11871 + force ? " immediate" :
11872 + do_ACK==RXRPC_ACK_REQUESTED ? " merge-req" :
11873 + hdr->flags & RXRPC_LAST_PACKET ? " finalise" :
11877 + /* send any pending normal ACKs if need be */
11878 + if (call->ackr_pend_cnt>0) {
11879 + /* fill out the appropriate form */
11880 + call->ackr.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
11881 + call->ackr.maxSkew = htons(min(call->ackr_high_seq - seq,65535U));
11882 + call->ackr.firstPacket = htonl(call->ackr_win_bot);
11883 + call->ackr.previousPacket = call->ackr_prev_seq;
11884 + call->ackr.serial = hdr->serial;
11885 + call->ackr.nAcks = call->ackr_pend_cnt;
11887 + if (do_ACK==RXRPC_ACK_REQUESTED)
11888 + call->ackr.reason = do_ACK;
11890 + /* generate the ACK immediately if necessary */
11891 + if (special_ACK || force) {
11892 + err = __rxrpc_call_gen_normal_ACK(call,do_ACK==RXRPC_ACK_DELAY ? 0 : seq);
11900 + if (call->ackr.reason==RXRPC_ACK_REQUESTED)
11901 + call->ackr_dfr_seq = seq;
11903 + /* start the ACK timer if not running if there are any pending deferred ACKs */
11904 + if (call->ackr_pend_cnt>0 &&
11905 + call->ackr.reason!=RXRPC_ACK_REQUESTED &&
11906 + !timer_pending(&call->ackr_dfr_timo)
11908 + unsigned long timo;
11910 + timo = rxrpc_call_dfr_ack_timeout + jiffies;
11912 + _debug("START ACKR TIMER for cj=%lu",timo-call->cjif);
11914 + spin_lock(&call->lock);
11915 + mod_timer(&call->ackr_dfr_timo,timo);
11916 + spin_unlock(&call->lock);
11918 + else if ((call->ackr_pend_cnt==0 || call->ackr.reason==RXRPC_ACK_REQUESTED) &&
11919 + timer_pending(&call->ackr_dfr_timo)
11921 + /* stop timer if no pending ACKs */
11922 + _debug("CLEAR ACKR TIMER");
11923 + del_timer_sync(&call->ackr_dfr_timo);
11926 + /* send a special ACK if one is required */
11927 + if (special_ACK) {
11928 + struct rxrpc_ackpacket ack;
11929 + struct iovec diov[2];
11930 + u8 acks[1] = { RXRPC_ACK_TYPE_ACK };
11932 + /* fill out the appropriate form */
11933 + ack.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
11934 + ack.maxSkew = htons(min(call->ackr_high_seq - seq,65535U));
11935 + ack.firstPacket = htonl(call->ackr_win_bot);
11936 + ack.previousPacket = call->ackr_prev_seq;
11937 + ack.serial = hdr->serial;
11938 + ack.reason = special_ACK;
11940 + //ack.nAcks = special_ACK==RXRPC_ACK_OUT_OF_SEQUENCE ? 0 : hdr->seq ? 1 : 0;
11942 + _proto("Rx Sending s-ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
11943 + ntohs(ack.maxSkew),ntohl(ack.firstPacket),ntohl(ack.previousPacket),
11944 + ntohl(ack.serial),rxrpc_acks[ack.reason],ack.nAcks);
11946 + diov[0].iov_len = sizeof(struct rxrpc_ackpacket);
11947 + diov[0].iov_base = &ack;
11948 + diov[1].iov_len = sizeof(acks);
11949 + diov[1].iov_base = acks;
11951 + /* build and send the message */
11952 + err = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_ACK,
11953 + hdr->seq ? 2 : 1,diov,
11962 + msg->hdr.seq = htonl(seq);
11963 + msg->hdr.flags |= RXRPC_SLOW_START_OK;
11965 + err = rxrpc_conn_sendmsg(call->conn,msg);
11966 + rxrpc_put_message(msg);
11971 + call->pkt_snd_count++;
11976 + call->ackr_prev_seq = hdr->seq;
11978 + _leave(" = %d",ret);
11980 +} /* end rxrpc_call_generate_ACK() */
11982 +/*****************************************************************************/
11984 + * handle work to be done on a call
11985 + * - includes packet reception and timeout processing
11987 +void rxrpc_call_do_stuff(struct rxrpc_call *call)
11989 + _enter("%p{flags=%lx}",call,call->flags);
11991 + /* handle packet reception */
11992 + if (call->flags & RXRPC_CALL_RCV_PKT) {
11993 + _debug("- receive packet");
11994 + call->flags &= ~RXRPC_CALL_RCV_PKT;
11995 + rxrpc_call_receive_packet(call);
11998 + /* handle overdue ACKs */
11999 + if (call->flags & RXRPC_CALL_ACKS_TIMO) {
12000 + _debug("- overdue ACK timeout");
12001 + call->flags &= ~RXRPC_CALL_ACKS_TIMO;
12002 + rxrpc_call_resend(call,call->snd_seq_count);
12005 + /* handle lack of reception */
12006 + if (call->flags & RXRPC_CALL_RCV_TIMO) {
12007 + _debug("- reception timeout");
12008 + call->flags &= ~RXRPC_CALL_RCV_TIMO;
12009 + rxrpc_call_abort(call,-EIO);
12012 + /* handle deferred ACKs */
12013 + if (call->flags & RXRPC_CALL_ACKR_TIMO ||
12014 + (call->ackr.nAcks>0 && call->ackr.reason==RXRPC_ACK_REQUESTED)
12016 + _debug("- deferred ACK timeout: cj=%05lu r=%s n=%u",
12017 + jiffies - call->cjif,
12018 + rxrpc_acks[call->ackr.reason],
12019 + call->ackr.nAcks);
12021 + call->flags &= ~RXRPC_CALL_ACKR_TIMO;
12023 + if (call->ackr.nAcks>0 && call->app_call_state!=RXRPC_CSTATE_ERROR) {
12024 + /* generate ACK */
12025 + __rxrpc_call_gen_normal_ACK(call,call->ackr_dfr_seq);
12026 + call->ackr_dfr_seq = 0;
12032 +} /* end rxrpc_call_do_timeout() */
12034 +/*****************************************************************************/
12036 + * send an abort message at call or connection level
12037 + * - must be called with call->lock held
12038 + * - the supplied error code is sent as the packet data
12040 +static int __rxrpc_call_abort(struct rxrpc_call *call, int errno)
12042 + struct rxrpc_connection *conn = call->conn;
12043 + struct rxrpc_message *msg;
12044 + struct iovec diov[1];
12048 + _enter("%p{%08x},%p{%d},%d",conn,ntohl(conn->conn_id),call,ntohl(call->call_id),errno);
12050 + /* if this call is already aborted, then just wake up any waiters */
12051 + if (call->app_call_state==RXRPC_CSTATE_ERROR) {
12052 + spin_unlock(&call->lock);
12053 + call->app_error_func(call);
12058 + rxrpc_get_call(call);
12060 + /* change the state _with_ the lock still held */
12061 + call->app_call_state = RXRPC_CSTATE_ERROR;
12062 + call->app_err_state = RXRPC_ESTATE_LOCAL_ABORT;
12063 + call->app_errno = errno;
12064 + call->app_mark = RXRPC_APP_MARK_EOF;
12065 + call->app_read_buf = NULL;
12066 + call->app_async_read = 0;
12070 + /* ask the app to translate the error code */
12071 + call->app_aemap_func(call);
12073 + spin_unlock(&call->lock);
12075 + /* flush any outstanding ACKs */
12076 + del_timer_sync(&call->acks_timeout);
12077 + del_timer_sync(&call->rcv_timeout);
12078 + del_timer_sync(&call->ackr_dfr_timo);
12080 + if (rxrpc_call_is_ack_pending(call))
12081 + __rxrpc_call_gen_normal_ACK(call,0);
12083 + /* send the abort packet only if we actually traded some other packets */
12085 + if (call->pkt_snd_count || call->pkt_rcv_count) {
12086 + /* actually send the abort */
12087 + _proto("Rx Sending Call ABORT { data=%d }",call->app_abort_code);
12089 + _error = htonl(call->app_abort_code);
12091 + diov[0].iov_len = sizeof(_error);
12092 + diov[0].iov_base = &_error;
12094 + ret = rxrpc_conn_newmsg(conn,call,RXRPC_PACKET_TYPE_ABORT,1,diov,GFP_KERNEL,&msg);
12096 + ret = rxrpc_conn_sendmsg(conn,msg);
12097 + rxrpc_put_message(msg);
12101 + /* tell the app layer to let go */
12102 + call->app_error_func(call);
12104 + rxrpc_put_call(call);
12106 + _leave(" = %d",ret);
12109 +} /* end __rxrpc_call_abort() */
12111 +/*****************************************************************************/
12113 + * send an abort message at call or connection level
12114 + * - the supplied error code is sent as the packet data
12116 +int rxrpc_call_abort(struct rxrpc_call *call, int error)
12118 + spin_lock(&call->lock);
12120 + return __rxrpc_call_abort(call,error);
12122 +} /* end rxrpc_call_abort() */
12124 +/*****************************************************************************/
12126 + * process packets waiting for this call
12128 +static void rxrpc_call_receive_packet(struct rxrpc_call *call)
12130 + struct rxrpc_message *msg;
12131 + struct list_head *_p;
12134 + _enter("%p",call);
12136 + rxrpc_get_call(call); /* must not go away too soon if aborted by app-layer */
12138 + while (!list_empty(&call->rcv_receiveq)) {
12139 + /* try to get next packet */
12141 + spin_lock(&call->lock);
12142 + if (!list_empty(&call->rcv_receiveq)) {
12143 + _p = call->rcv_receiveq.next;
12144 + list_del_init(_p);
12146 + spin_unlock(&call->lock);
12150 + msg = list_entry(_p,struct rxrpc_message,link);
12152 + _proto("Rx %05lu Received %s packet (%%%u,#%u,%c%c%c%c%c)",
12153 + jiffies - call->cjif,
12154 + rxrpc_pkts[msg->hdr.type],
12155 + ntohl(msg->hdr.serial),
12157 + msg->hdr.flags & RXRPC_JUMBO_PACKET ? 'j' : '-',
12158 + msg->hdr.flags & RXRPC_MORE_PACKETS ? 'm' : '-',
12159 + msg->hdr.flags & RXRPC_LAST_PACKET ? 'l' : '-',
12160 + msg->hdr.flags & RXRPC_REQUEST_ACK ? 'r' : '-',
12161 + msg->hdr.flags & RXRPC_CLIENT_INITIATED ? 'C' : 'S'
12164 + switch (msg->hdr.type) {
12165 + /* deal with data packets */
12166 + case RXRPC_PACKET_TYPE_DATA:
12167 + /* ACK the packet if necessary */
12168 + switch (rxrpc_call_generate_ACK(call,&msg->hdr,NULL)) {
12169 + case 0: /* useful packet */
12170 + rxrpc_call_receive_data_packet(call,msg);
12172 + case 1: /* duplicate or out-of-window packet */
12175 + rxrpc_put_message(msg);
12180 + /* deal with ACK packets */
12181 + case RXRPC_PACKET_TYPE_ACK:
12182 + rxrpc_call_receive_ack_packet(call,msg);
12185 + /* deal with abort packets */
12186 + case RXRPC_PACKET_TYPE_ABORT:
12188 + if (skb_copy_bits(msg->pkt,msg->offset,&data32,sizeof(data32))<0) {
12189 + printk("Rx Received short ABORT packet\n");
12192 + data32 = ntohl(data32);
12195 + _proto("Rx Received Call ABORT { data=%d }",data32);
12197 + spin_lock(&call->lock);
12198 + call->app_call_state = RXRPC_CSTATE_ERROR;
12199 + call->app_err_state = RXRPC_ESTATE_PEER_ABORT;
12200 + call->app_abort_code = data32;
12201 + call->app_errno = -ECONNABORTED;
12202 + call->app_mark = RXRPC_APP_MARK_EOF;
12203 + call->app_read_buf = NULL;
12204 + call->app_async_read = 0;
12206 + /* ask the app to translate the error code */
12207 + call->app_aemap_func(call);
12209 + spin_unlock(&call->lock);
12210 + call->app_error_func(call);
12214 + /* deal with other packet types */
12215 + _proto("Rx Unsupported packet type %u (#%u)",msg->hdr.type,msg->seq);
12219 + rxrpc_put_message(msg);
12223 + rxrpc_put_call(call);
12225 +} /* end rxrpc_call_receive_packet() */
12227 +/*****************************************************************************/
12229 + * process next data packet
12230 + * - as the next data packet arrives:
12231 + * - it is queued on app_readyq _if_ it is the next one expected (app_ready_seq+1)
12232 + * - it is queued on app_unreadyq _if_ it is not the next one expected
12233 + * - if a packet placed on app_readyq completely fills a hole leading up to the first packet
12234 + * on app_unreadyq, then packets now in sequence are tranferred to app_readyq
12235 + * - the application layer can only see packets on app_readyq (app_ready_qty bytes)
12236 + * - the application layer is prodded every time a new packet arrives
12238 +static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc_message *msg)
12240 + const struct rxrpc_operation *optbl, *op;
12241 + struct rxrpc_message *pmsg;
12242 + struct list_head *_p;
12243 + int ret, lo, hi, rmtimo;
12246 + _enter("%p{%u},%p{%u}",call,ntohl(call->call_id),msg,msg->seq);
12248 + rxrpc_get_message(msg);
12250 + /* add to the unready queue if we'd have to create a hole in the ready queue otherwise */
12251 + if (msg->seq != call->app_ready_seq+1) {
12252 + _debug("Call add packet %d to unreadyq",msg->seq);
12254 + /* insert in seq order */
12255 + list_for_each(_p,&call->app_unreadyq) {
12256 + pmsg = list_entry(_p,struct rxrpc_message,link);
12257 + if (pmsg->seq>msg->seq)
12261 + list_add_tail(&msg->link,_p);
12263 + _leave(" [unreadyq]");
12267 + /* next in sequence - simply append into the call's ready queue */
12268 + _debug("Call add packet %d to readyq (+%d => %d bytes)",
12269 + msg->seq,msg->dsize,call->app_ready_qty);
12271 + spin_lock(&call->lock);
12272 + call->app_ready_seq = msg->seq;
12273 + call->app_ready_qty += msg->dsize;
12274 + list_add_tail(&msg->link,&call->app_readyq);
12276 + /* move unready packets to the readyq if we got rid of a hole */
12277 + while (!list_empty(&call->app_unreadyq)) {
12278 + pmsg = list_entry(call->app_unreadyq.next,struct rxrpc_message,link);
12280 + if (pmsg->seq != call->app_ready_seq+1)
12283 + /* next in sequence - just move list-to-list */
12284 + _debug("Call transfer packet %d to readyq (+%d => %d bytes)",
12285 + pmsg->seq,pmsg->dsize,call->app_ready_qty);
12287 + call->app_ready_seq = pmsg->seq;
12288 + call->app_ready_qty += pmsg->dsize;
12289 + list_del_init(&pmsg->link);
12290 + list_add_tail(&pmsg->link,&call->app_readyq);
12293 + /* see if we've got the last packet yet */
12294 + if (!list_empty(&call->app_readyq)) {
12295 + pmsg = list_entry(call->app_readyq.prev,struct rxrpc_message,link);
12296 + if (pmsg->hdr.flags & RXRPC_LAST_PACKET) {
12297 + call->app_last_rcv = 1;
12298 + _debug("Last packet on readyq");
12302 + switch (call->app_call_state) {
12303 + /* do nothing if call already aborted */
12304 + case RXRPC_CSTATE_ERROR:
12305 + spin_unlock(&call->lock);
12306 + _leave(" [error]");
12309 + /* extract the operation ID from an incoming call if that's not yet been done */
12310 + case RXRPC_CSTATE_SRVR_RCV_OPID:
12311 + spin_unlock(&call->lock);
12313 + /* handle as yet insufficient data for the operation ID */
12314 + if (call->app_ready_qty<4) {
12315 + if (call->app_last_rcv)
12316 + rxrpc_call_abort(call,-EINVAL); /* trouble - last packet seen */
12322 + /* pull the operation ID out of the buffer */
12323 + ret = rxrpc_call_read_data(call,&opid,sizeof(opid),0);
12325 + printk("Unexpected error from read-data: %d\n",ret);
12326 + if (call->app_call_state!=RXRPC_CSTATE_ERROR)
12327 + rxrpc_call_abort(call,ret);
12331 + call->app_opcode = ntohl(opid);
12333 + /* locate the operation in the available ops table */
12334 + optbl = call->conn->service->ops_begin;
12336 + hi = call->conn->service->ops_end - optbl;
12339 + int mid = (hi+lo) / 2;
12340 + op = &optbl[mid];
12341 + if (call->app_opcode==op->id)
12343 + if (call->app_opcode>op->id)
12349 + /* search failed */
12350 + kproto("Rx Client requested operation %d from %s service",
12351 + call->app_opcode,call->conn->service->name);
12352 + rxrpc_call_abort(call,-EINVAL);
12353 + _leave(" [inval]");
12357 + _proto("Rx Client requested operation %s from %s service",
12358 + op->name,call->conn->service->name);
12360 + /* we're now waiting for the argument block (unless the call was aborted) */
12361 + spin_lock(&call->lock);
12362 + if (call->app_call_state==RXRPC_CSTATE_SRVR_RCV_OPID ||
12363 + call->app_call_state==RXRPC_CSTATE_SRVR_SND_REPLY) {
12364 + if (!call->app_last_rcv)
12365 + call->app_call_state = RXRPC_CSTATE_SRVR_RCV_ARGS;
12366 + else if (call->app_ready_qty>0)
12367 + call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS;
12369 + call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
12370 + call->app_mark = op->asize;
12371 + call->app_user = op->user;
12373 + spin_unlock(&call->lock);
12378 + case RXRPC_CSTATE_SRVR_RCV_ARGS:
12379 + /* change state if just received last packet of arg block */
12380 + if (call->app_last_rcv)
12381 + call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS;
12382 + spin_unlock(&call->lock);
12387 + case RXRPC_CSTATE_CLNT_RCV_REPLY:
12388 + /* change state if just received last packet of reply block */
12390 + if (call->app_last_rcv) {
12391 + call->app_call_state = RXRPC_CSTATE_CLNT_GOT_REPLY;
12394 + spin_unlock(&call->lock);
12397 + del_timer_sync(&call->acks_timeout);
12398 + del_timer_sync(&call->rcv_timeout);
12399 + del_timer_sync(&call->ackr_dfr_timo);
12406 + /* deal with data reception in an unexpected state */
12407 + printk("Unexpected state [[[ %u ]]]\n",call->app_call_state);
12408 + __rxrpc_call_abort(call,-EBADMSG);
12413 + if (call->app_call_state==RXRPC_CSTATE_CLNT_RCV_REPLY && call->app_last_rcv)
12416 + /* otherwise just invoke the data function whenever we can satisfy its desire for more
12419 + _proto("Rx Received Op Data: st=%u qty=%u mk=%u%s",
12420 + call->app_call_state,call->app_ready_qty,call->app_mark,
12421 + call->app_last_rcv ? " last-rcvd" : "");
12423 + spin_lock(&call->lock);
12425 + ret = __rxrpc_call_read_data(call);
12428 + spin_unlock(&call->lock);
12429 + call->app_attn_func(call);
12432 + spin_unlock(&call->lock);
12434 + case -ECONNABORTED:
12435 + spin_unlock(&call->lock);
12438 + __rxrpc_call_abort(call,ret);
12446 +} /* end rxrpc_call_receive_data_packet() */
12448 +/*****************************************************************************/
12450 + * received an ACK packet
12452 +static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_message *msg)
12454 + struct rxrpc_ackpacket ack;
12455 + rxrpc_serial_t serial;
12459 + _enter("%p{%u},%p{%u}",call,ntohl(call->call_id),msg,msg->seq);
12461 + /* extract the basic ACK record */
12462 + if (skb_copy_bits(msg->pkt,msg->offset,&ack,sizeof(ack))<0) {
12463 + printk("Rx Received short ACK packet\n");
12466 + msg->offset += sizeof(ack);
12468 + serial = ack.serial;
12469 + seq = ntohl(ack.firstPacket);
12471 + _proto("Rx Received ACK %%%d { b=%hu m=%hu f=%u p=%u s=%u r=%s n=%u }",
12472 + ntohl(msg->hdr.serial),
12473 + ntohs(ack.bufferSpace),
12474 + ntohs(ack.maxSkew),
12476 + ntohl(ack.previousPacket),
12478 + rxrpc_acks[ack.reason],
12482 + /* check the other side isn't ACK'ing a sequence number I haven't sent yet */
12483 + if (ack.nAcks>0 && (seq > call->snd_seq_count || seq+ack.nAcks-1 > call->snd_seq_count)) {
12484 + printk("Received ACK (#%u-#%u) for unsent packet\n",seq,seq+ack.nAcks-1);
12485 + rxrpc_call_abort(call,-EINVAL);
12490 + /* deal with RTT calculation */
12492 + struct rxrpc_message *rttmsg;
12494 + /* find the prompting packet */
12495 + spin_lock(&call->lock);
12496 + if (call->snd_ping && call->snd_ping->hdr.serial==serial) {
12497 + /* it was a ping packet */
12498 + rttmsg = call->snd_ping;
12499 + call->snd_ping = NULL;
12500 + spin_unlock(&call->lock);
12503 + rttmsg->rttdone = 1;
12504 + rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
12505 + rxrpc_put_message(rttmsg);
12509 + struct list_head *_p;
12511 + /* it ought to be a data packet - look in the pending ACK list */
12512 + list_for_each(_p,&call->acks_pendq) {
12513 + rttmsg = list_entry(_p,struct rxrpc_message,link);
12514 + if (rttmsg->hdr.serial==serial) {
12515 + if (rttmsg->rttdone)
12516 + break; /* never do RTT twice without resending */
12518 + rttmsg->rttdone = 1;
12519 + rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
12523 + spin_unlock(&call->lock);
12527 + switch (ack.reason) {
12528 + /* deal with negative/positive acknowledgement of data packets */
12529 + case RXRPC_ACK_REQUESTED:
12530 + case RXRPC_ACK_DELAY:
12531 + case RXRPC_ACK_IDLE:
12532 + rxrpc_call_definitively_ACK(call,seq-1);
12534 + case RXRPC_ACK_DUPLICATE:
12535 + case RXRPC_ACK_OUT_OF_SEQUENCE:
12536 + case RXRPC_ACK_EXCEEDS_WINDOW:
12537 + call->snd_resend_cnt = 0;
12538 + ret = rxrpc_call_record_ACK(call,msg,seq,ack.nAcks);
12540 + rxrpc_call_abort(call,ret);
12543 + /* respond to ping packets immediately */
12544 + case RXRPC_ACK_PING:
12545 + rxrpc_call_generate_ACK(call,&msg->hdr,&ack);
12548 + /* only record RTT on ping response packets */
12549 + case RXRPC_ACK_PING_RESPONSE:
12550 + if (call->snd_ping) {
12551 + struct rxrpc_message *rttmsg;
12553 + /* only do RTT stuff if the response matches the retained ping */
12555 + spin_lock(&call->lock);
12556 + if (call->snd_ping && call->snd_ping->hdr.serial==ack.serial) {
12557 + rttmsg = call->snd_ping;
12558 + call->snd_ping = NULL;
12560 + spin_unlock(&call->lock);
12563 + rttmsg->rttdone = 1;
12564 + rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
12565 + rxrpc_put_message(rttmsg);
12571 + printk("Unsupported ACK reason %u\n",ack.reason);
12576 +} /* end rxrpc_call_receive_ack_packet() */
12578 +/*****************************************************************************/
12580 + * record definitive ACKs for all messages up to and including the one with the 'highest' seq
12582 +static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, rxrpc_seq_t highest)
12584 + struct rxrpc_message *msg;
12585 + int now_complete;
12587 + _enter("%p{ads=%u},%u",call,call->acks_dftv_seq,highest);
12589 + while (call->acks_dftv_seq<highest) {
12590 + call->acks_dftv_seq++;
12592 + _proto("Definitive ACK on packet #%u",call->acks_dftv_seq);
12594 + /* discard those at front of queue until message with highest ACK is found */
12595 + spin_lock(&call->lock);
12597 + if (!list_empty(&call->acks_pendq)) {
12598 + msg = list_entry(call->acks_pendq.next,struct rxrpc_message,link);
12599 + list_del_init(&msg->link); /* dequeue */
12600 + if (msg->state==RXRPC_MSG_SENT)
12601 + call->acks_pend_cnt--;
12603 + spin_unlock(&call->lock);
12605 + /* insanity check */
12607 + panic("%s(): acks_pendq unexpectedly empty\n",__FUNCTION__);
12609 + if (msg->seq!=call->acks_dftv_seq)
12610 + panic("%s(): Packet #%u expected at front of acks_pendq (#%u found)\n",
12611 + __FUNCTION__,call->acks_dftv_seq,msg->seq);
12613 + /* discard the message */
12614 + msg->state = RXRPC_MSG_DONE;
12615 + rxrpc_put_message(msg);
12618 + /* if all sent packets are definitively ACK'd then prod any sleepers just in case */
12619 + now_complete = 0;
12620 + spin_lock(&call->lock);
12621 + if (call->acks_dftv_seq==call->snd_seq_count) {
12622 + if (call->app_call_state!=RXRPC_CSTATE_COMPLETE) {
12623 + call->app_call_state = RXRPC_CSTATE_COMPLETE;
12625 + now_complete = 1;
12628 + spin_unlock(&call->lock);
12630 + if (now_complete) {
12631 + del_timer_sync(&call->acks_timeout);
12632 + del_timer_sync(&call->rcv_timeout);
12633 + del_timer_sync(&call->ackr_dfr_timo);
12634 + call->app_attn_func(call);
12638 +} /* end rxrpc_call_definitively_ACK() */
12640 +/*****************************************************************************/
12642 + * record the specified amount of ACKs/NAKs
12644 +static int rxrpc_call_record_ACK(struct rxrpc_call *call,
12645 + struct rxrpc_message *msg,
12649 + struct rxrpc_message *dmsg;
12650 + struct list_head *_p;
12651 + rxrpc_seq_t highest;
12654 + char resend, now_complete;
12657 + _enter("%p{apc=%u ads=%u},%p,%u,%u",
12658 + call,call->acks_pend_cnt,call->acks_dftv_seq,msg,seq,count);
12660 + /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order ACKs) */
12661 + if (seq<=call->acks_dftv_seq) {
12662 + unsigned delta = call->acks_dftv_seq - seq;
12664 + if (count<=delta) {
12665 + _leave(" = 0 [all definitively ACK'd]");
12671 + msg->offset += delta;
12674 + highest = seq + count - 1;
12676 + while (count>0) {
12677 + /* extract up to 16 ACK slots at a time */
12678 + chunk = min(count,sizeof(acks));
12681 + memset(acks,2,sizeof(acks));
12683 + if (skb_copy_bits(msg->pkt,msg->offset,&acks,chunk)<0) {
12684 + printk("Rx Received short ACK packet\n");
12685 + _leave(" = -EINVAL");
12688 + msg->offset += chunk;
12690 + /* check that the ACK set is valid */
12691 + for (ix=0; ix<chunk; ix++) {
12692 + switch (acks[ix]) {
12693 + case RXRPC_ACK_TYPE_ACK:
12695 + case RXRPC_ACK_TYPE_NACK:
12699 + printk("Rx Received unsupported ACK state %u\n",acks[ix]);
12700 + _leave(" = -EINVAL");
12705 + _proto("Rx ACK of packets #%u-#%u [%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)",
12707 + _acktype[acks[0x0]],
12708 + _acktype[acks[0x1]],
12709 + _acktype[acks[0x2]],
12710 + _acktype[acks[0x3]],
12711 + _acktype[acks[0x4]],
12712 + _acktype[acks[0x5]],
12713 + _acktype[acks[0x6]],
12714 + _acktype[acks[0x7]],
12715 + _acktype[acks[0x8]],
12716 + _acktype[acks[0x9]],
12717 + _acktype[acks[0xA]],
12718 + _acktype[acks[0xB]],
12719 + _acktype[acks[0xC]],
12720 + _acktype[acks[0xD]],
12721 + _acktype[acks[0xE]],
12722 + _acktype[acks[0xF]],
12723 + call->acks_pend_cnt
12726 + /* mark the packets in the ACK queue as being provisionally ACK'd */
12728 + spin_lock(&call->lock);
12730 + /* find the first packet ACK'd/NAK'd here */
12731 + list_for_each(_p,&call->acks_pendq) {
12732 + dmsg = list_entry(_p,struct rxrpc_message,link);
12733 + if (dmsg->seq==seq)
12734 + goto found_first;
12735 + _debug("- %u: skipping #%u",ix,dmsg->seq);
12741 + _debug("- %u: processing #%u (%c) apc=%u",
12742 + ix,dmsg->seq,_acktype[acks[ix]],call->acks_pend_cnt);
12744 + if (acks[ix]==RXRPC_ACK_TYPE_ACK) {
12745 + if (dmsg->state==RXRPC_MSG_SENT) call->acks_pend_cnt--;
12746 + dmsg->state = RXRPC_MSG_ACKED;
12749 + if (dmsg->state==RXRPC_MSG_ACKED) call->acks_pend_cnt++;
12750 + dmsg->state = RXRPC_MSG_SENT;
12755 + _p = dmsg->link.next;
12756 + dmsg = list_entry(_p,struct rxrpc_message,link);
12757 + } while(ix<chunk && _p!=&call->acks_pendq && dmsg->seq==seq);
12762 + spin_unlock(&call->lock);
12766 + rxrpc_call_resend(call,highest);
12768 + /* if all packets are provisionally ACK'd, then wake up anyone who's waiting for that */
12769 + now_complete = 0;
12770 + spin_lock(&call->lock);
12771 + if (call->acks_pend_cnt==0) {
12772 + if (call->app_call_state==RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) {
12773 + call->app_call_state = RXRPC_CSTATE_COMPLETE;
12776 + now_complete = 1;
12778 + spin_unlock(&call->lock);
12780 + if (now_complete) {
12781 + _debug("- wake up waiters");
12782 + del_timer_sync(&call->acks_timeout);
12783 + del_timer_sync(&call->rcv_timeout);
12784 + del_timer_sync(&call->ackr_dfr_timo);
12785 + call->app_attn_func(call);
12788 + _leave(" = 0 (apc=%u)",call->acks_pend_cnt);
12792 + panic("%s(): acks_pendq in bad state (packet #%u absent)\n",__FUNCTION__,seq);
12794 +} /* end rxrpc_call_record_ACK() */
12796 +/*****************************************************************************/
12798 + * transfer data from the ready packet queue to the asynchronous read buffer
12799 + * - since this func is the only one going to look at packets queued on app_readyq, we don't need
12800 + * a lock to modify or access them, only to modify the queue pointers
12801 + * - called with call->lock held
12802 + * - the buffer must be in kernel space
12804 + * 0 if buffer filled
12805 + * -EAGAIN if buffer not filled and more data to come
12806 + * -EBADMSG if last packet received and insufficient data left
12807 + * -ECONNABORTED if the call has in an error state
12809 +static int __rxrpc_call_read_data(struct rxrpc_call *call)
12811 + struct rxrpc_message *msg;
12815 + _enter("%p{as=%d buf=%p qty=%u/%u}",
12816 + call,call->app_async_read,call->app_read_buf,call->app_ready_qty,call->app_mark);
12818 + /* check the state */
12819 + switch (call->app_call_state) {
12820 + case RXRPC_CSTATE_SRVR_RCV_ARGS:
12821 + case RXRPC_CSTATE_CLNT_RCV_REPLY:
12822 + if (call->app_last_rcv) {
12823 + printk("%s(%p,%p,%d): Inconsistent call state (%s, last pkt)",
12824 + __FUNCTION__,call,call->app_read_buf,call->app_mark,
12825 + rxrpc_call_states[call->app_call_state]);
12830 + case RXRPC_CSTATE_SRVR_RCV_OPID:
12831 + case RXRPC_CSTATE_SRVR_GOT_ARGS:
12832 + case RXRPC_CSTATE_CLNT_GOT_REPLY:
12835 + case RXRPC_CSTATE_SRVR_SND_REPLY:
12836 + if (!call->app_last_rcv) {
12837 + printk("%s(%p,%p,%d): Inconsistent call state (%s, not last pkt)",
12838 + __FUNCTION__,call,call->app_read_buf,call->app_mark,
12839 + rxrpc_call_states[call->app_call_state]);
12842 + _debug("Trying to read data from call in SND_REPLY state");
12845 + case RXRPC_CSTATE_ERROR:
12846 + _leave(" = -ECONNABORTED");
12847 + return -ECONNABORTED;
12850 + printk("reading in unexpected state [[[ %u ]]]\n",call->app_call_state);
12854 + /* handle the case of not having an async buffer */
12855 + if (!call->app_async_read) {
12856 + if (call->app_mark==RXRPC_APP_MARK_EOF) {
12857 + ret = call->app_last_rcv ? 0 : -EAGAIN;
12860 + if (call->app_mark >= call->app_ready_qty) {
12861 + call->app_mark = RXRPC_APP_MARK_EOF;
12865 + ret = call->app_last_rcv ? -EBADMSG : -EAGAIN;
12869 + _leave(" = %d [no buf]",ret);
12873 + while (!list_empty(&call->app_readyq) && call->app_mark>0) {
12874 + msg = list_entry(call->app_readyq.next,struct rxrpc_message,link);
12876 + /* drag as much data as we need out of this packet */
12877 + qty = min(call->app_mark,msg->dsize);
12879 + _debug("reading %u from skb=%p off=%lu",qty,msg->pkt,msg->offset);
12881 + if (call->app_read_buf)
12882 + if (skb_copy_bits(msg->pkt,msg->offset,call->app_read_buf,qty)<0)
12883 + panic("%s: Failed to copy data from packet: (%p,%p,%d)",
12884 + __FUNCTION__,call,call->app_read_buf,qty);
12886 + /* if that packet is now empty, discard it */
12887 + call->app_ready_qty -= qty;
12888 + msg->dsize -= qty;
12890 + if (msg->dsize==0) {
12891 + list_del_init(&msg->link);
12892 + rxrpc_put_message(msg);
12895 + msg->offset += qty;
12898 + call->app_mark -= qty;
12899 + if (call->app_read_buf) call->app_read_buf += qty;
12902 + if (call->app_mark==0) {
12903 + call->app_async_read = 0;
12904 + call->app_mark = RXRPC_APP_MARK_EOF;
12905 + call->app_read_buf = NULL;
12907 + /* adjust the state if used up all packets */
12908 + if (list_empty(&call->app_readyq) && call->app_last_rcv) {
12909 + switch (call->app_call_state) {
12910 + case RXRPC_CSTATE_SRVR_RCV_OPID:
12911 + call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
12912 + call->app_mark = RXRPC_APP_MARK_EOF;
12914 + del_timer_sync(&call->rcv_timeout);
12916 + case RXRPC_CSTATE_SRVR_GOT_ARGS:
12917 + call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
12919 + del_timer_sync(&call->rcv_timeout);
12922 + call->app_call_state = RXRPC_CSTATE_COMPLETE;
12924 + del_timer_sync(&call->acks_timeout);
12925 + del_timer_sync(&call->ackr_dfr_timo);
12926 + del_timer_sync(&call->rcv_timeout);
12935 + if (call->app_last_rcv) {
12936 + _debug("Insufficient data (%u/%u)",call->app_ready_qty,call->app_mark);
12937 + call->app_async_read = 0;
12938 + call->app_mark = RXRPC_APP_MARK_EOF;
12939 + call->app_read_buf = NULL;
12941 + _leave(" = -EBADMSG");
12945 + _leave(" = -EAGAIN");
12947 +} /* end __rxrpc_call_read_data() */
12949 +/*****************************************************************************/
12951 + * attempt to read the specified amount of data from the call's ready queue into the buffer
12953 + * - since this func is the only one going to look at packets queued on app_readyq, we don't need
12954 + * a lock to modify or access them, only to modify the queue pointers
12955 + * - if the buffer pointer is NULL, then data is merely drained, not copied
12956 + * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is enough data or an
12957 + * error will be generated
12958 + * - note that the caller must have added the calling task to the call's wait queue beforehand
12959 + * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this function doesn't read
12960 + * all available data
12962 +int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int flags)
12966 + _enter("%p{arq=%u},%p,%d,%x",call,call->app_ready_qty,buffer,size,flags);
12968 + spin_lock(&call->lock);
12970 + if (unlikely(!!call->app_read_buf)) {
12971 + spin_unlock(&call->lock);
12972 + _leave(" = -EBUSY");
12976 + call->app_mark = size;
12977 + call->app_read_buf = buffer;
12978 + call->app_async_read = 1;
12979 + call->app_read_count++;
12981 + /* read as much data as possible */
12982 + ret = __rxrpc_call_read_data(call);
12985 + if (flags&RXRPC_CALL_READ_ALL && (!call->app_last_rcv || call->app_ready_qty>0)) {
12986 + _leave(" = -EBADMSG");
12987 + __rxrpc_call_abort(call,-EBADMSG);
12991 + spin_unlock(&call->lock);
12992 + call->app_attn_func(call);
12996 + case -ECONNABORTED:
12997 + spin_unlock(&call->lock);
12998 + _leave(" = %d [aborted]",ret);
13002 + __rxrpc_call_abort(call,ret);
13003 + _leave(" = %d",ret);
13007 + spin_unlock(&call->lock);
13009 + if (!(flags&RXRPC_CALL_READ_BLOCK)) {
13010 + _leave(" = -EAGAIN");
13014 + /* wait for the data to arrive */
13015 + _debug("blocking for data arrival");
13018 + set_current_state(TASK_INTERRUPTIBLE);
13019 + if (!call->app_async_read || signal_pending(current))
13023 + set_current_state(TASK_RUNNING);
13025 + if (signal_pending(current)) {
13026 + _leave(" = -EINTR");
13030 + if (call->app_call_state==RXRPC_CSTATE_ERROR) {
13031 + _leave(" = -ECONNABORTED");
13032 + return -ECONNABORTED;
13039 +} /* end rxrpc_call_read_data() */
13041 +/*****************************************************************************/
13043 + * write data to a call
13044 + * - the data may not be sent immediately if it doesn't fill a buffer
13045 + * - if we can't queue all the data for buffering now, siov[] will have been adjusted to take
13046 + * account of what has been sent
13048 +int rxrpc_call_write_data(struct rxrpc_call *call,
13050 + struct iovec siov[],
13054 + size_t *size_sent)
13056 + struct rxrpc_message *msg;
13057 + struct iovec *sptr;
13058 + size_t space, size, chunk, tmp;
13062 + _enter("%p,%u,%p,%02x,%x,%d,%p",call,sioc,siov,rxhdr_flags,alloc_flags,dup_data,size_sent);
13068 + /* can't send more if we've sent last packet from this end */
13069 + switch (call->app_call_state) {
13070 + case RXRPC_CSTATE_SRVR_SND_REPLY:
13071 + case RXRPC_CSTATE_CLNT_SND_ARGS:
13073 + case RXRPC_CSTATE_ERROR:
13074 + ret = call->app_errno;
13079 + /* calculate how much data we've been given */
13081 + for (; sioc>0; sptr++, sioc--) {
13082 + if (!sptr->iov_len) continue;
13084 + if (!sptr->iov_base)
13087 + size += sptr->iov_len;
13090 + _debug("- size=%u mtu=%u",size,call->conn->mtu_size);
13093 + /* make sure there's a message under construction */
13094 + if (!call->snd_nextmsg) {
13095 + /* no - allocate a message with no data yet attached */
13096 + ret = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_DATA,
13097 + 0,NULL,alloc_flags,&call->snd_nextmsg);
13100 + _debug("- allocated new message [ds=%u]",call->snd_nextmsg->dsize);
13103 + msg = call->snd_nextmsg;
13104 + msg->hdr.flags |= rxhdr_flags;
13106 + /* deal with zero-length terminal packet */
13108 + if (rxhdr_flags & RXRPC_LAST_PACKET) {
13109 + ret = rxrpc_call_flush(call);
13116 + /* work out how much space current packet has available */
13117 + space = call->conn->mtu_size - msg->dsize;
13118 + chunk = min(space,size);
13120 + _debug("- [before] space=%u chunk=%u",space,chunk);
13122 + while (!siov->iov_len)
13125 + /* if we are going to have to duplicate the data then coalesce it too */
13127 + /* don't allocate more that 1 page at a time */
13128 + if (chunk>PAGE_SIZE)
13129 + chunk = PAGE_SIZE;
13131 + /* allocate a data buffer and attach to the message */
13132 + buf = kmalloc(chunk,alloc_flags);
13133 + if (unlikely(!buf)) {
13134 + if (msg->dsize==sizeof(struct rxrpc_header)) {
13135 + /* discard an empty msg and wind back the seq counter */
13136 + rxrpc_put_message(msg);
13137 + call->snd_nextmsg = NULL;
13138 + call->snd_seq_count--;
13145 + tmp = msg->dcount++;
13146 + set_bit(tmp,&msg->dfree);
13147 + msg->data[tmp].iov_base = buf;
13148 + msg->data[tmp].iov_len = chunk;
13149 + msg->dsize += chunk;
13150 + *size_sent += chunk;
13153 + /* load the buffer with data */
13154 + while (chunk>0) {
13155 + tmp = min(chunk,siov->iov_len);
13156 + memcpy(buf,siov->iov_base,tmp);
13158 + siov->iov_base += tmp;
13159 + siov->iov_len -= tmp;
13160 + if (!siov->iov_len)
13166 + /* we want to attach the supplied buffers directly */
13167 + while (chunk>0 && msg->dcount<RXRPC_MSG_MAX_IOCS) {
13168 + tmp = msg->dcount++;
13169 + msg->data[tmp].iov_base = siov->iov_base;
13170 + msg->data[tmp].iov_len = siov->iov_len;
13171 + msg->dsize += siov->iov_len;
13172 + *size_sent += siov->iov_len;
13173 + size -= siov->iov_len;
13174 + chunk -= siov->iov_len;
13179 + _debug("- [loaded] chunk=%u size=%u",chunk,size);
13181 + /* dispatch the message when full, final or requesting ACK */
13182 + if (msg->dsize>=call->conn->mtu_size || rxhdr_flags) {
13183 + ret = rxrpc_call_flush(call);
13192 + _leave(" = %d (%d queued, %d rem)",ret,*size_sent,size);
13195 +} /* end rxrpc_call_write_data() */
13197 +/*****************************************************************************/
13199 + * flush outstanding packets to the network
13201 +int rxrpc_call_flush(struct rxrpc_call *call)
13203 + struct rxrpc_message *msg;
13206 + _enter("%p",call);
13208 + rxrpc_get_call(call);
13210 + /* if there's a packet under construction, then dispatch it now */
13211 + if (call->snd_nextmsg) {
13212 + msg = call->snd_nextmsg;
13213 + call->snd_nextmsg = NULL;
13215 + if (msg->hdr.flags & RXRPC_LAST_PACKET) {
13216 + msg->hdr.flags &= ~RXRPC_MORE_PACKETS;
13217 + msg->hdr.flags |= RXRPC_REQUEST_ACK;
13220 + msg->hdr.flags |= RXRPC_MORE_PACKETS;
13223 + _proto("Sending DATA message { ds=%u dc=%u df=%02lu }",
13224 + msg->dsize,msg->dcount,msg->dfree);
13226 + /* queue and adjust call state */
13227 + spin_lock(&call->lock);
13228 + list_add_tail(&msg->link,&call->acks_pendq);
13230 + /* decide what to do depending on current state and if this is the last packet */
13232 + switch (call->app_call_state) {
13233 + case RXRPC_CSTATE_SRVR_SND_REPLY:
13234 + if (msg->hdr.flags & RXRPC_LAST_PACKET) {
13235 + call->app_call_state = RXRPC_CSTATE_SRVR_RCV_FINAL_ACK;
13240 + case RXRPC_CSTATE_CLNT_SND_ARGS:
13241 + if (msg->hdr.flags & RXRPC_LAST_PACKET) {
13242 + call->app_call_state = RXRPC_CSTATE_CLNT_RCV_REPLY;
13247 + case RXRPC_CSTATE_ERROR:
13248 + ret = call->app_errno;
13250 + spin_unlock(&call->lock);
13254 + call->acks_pend_cnt++;
13256 + mod_timer(&call->acks_timeout,jiffies + rxrpc_call_acks_timeout);
13258 + spin_unlock(&call->lock);
13260 + ret = rxrpc_conn_sendmsg(call->conn,msg);
13262 + call->pkt_snd_count++;
13266 + rxrpc_put_call(call);
13268 + _leave(" = %d",ret);
13271 +} /* end rxrpc_call_flush() */
13273 +/*****************************************************************************/
13275 + * resend NAK'd or unacknowledged packets up to the highest one specified
13277 +static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
13279 + struct rxrpc_message *msg;
13280 + struct list_head *_p;
13281 + rxrpc_seq_t seq = 0;
13283 + _enter("%p,%u",call,highest);
13285 + _proto("Rx Resend required");
13287 + /* handle too many resends */
13288 + if (call->snd_resend_cnt>=rxrpc_call_max_resend) {
13289 + _debug("Aborting due to too many resends (rcv=%d)",call->pkt_rcv_count);
13290 + rxrpc_call_abort(call,call->pkt_rcv_count>0?-EIO:-ETIMEDOUT);
13295 + spin_lock(&call->lock);
13296 + call->snd_resend_cnt++;
13298 + /* determine which the next packet we might need to ACK is */
13299 + if (seq<=call->acks_dftv_seq)
13300 + seq = call->acks_dftv_seq;
13306 + /* look for the packet in the pending-ACK queue */
13307 + list_for_each(_p,&call->acks_pendq) {
13308 + msg = list_entry(_p,struct rxrpc_message,link);
13309 + if (msg->seq==seq)
13313 + panic("%s(%p,%d): Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n",
13314 + __FUNCTION__,call,highest,call->acks_dftv_seq,call->snd_seq_count,seq);
13317 + if (msg->state!=RXRPC_MSG_SENT)
13318 + continue; /* only un-ACK'd packets */
13320 + rxrpc_get_message(msg);
13321 + spin_unlock(&call->lock);
13323 + /* send each message again (and ignore any errors we might incur) */
13324 + _proto("Resending DATA message { ds=%u dc=%u df=%02lu }",
13325 + msg->dsize,msg->dcount,msg->dfree);
13327 + if (rxrpc_conn_sendmsg(call->conn,msg)==0)
13328 + call->pkt_snd_count++;
13330 + rxrpc_put_message(msg);
13332 + spin_lock(&call->lock);
13335 + /* reset the timeout */
13336 + mod_timer(&call->acks_timeout,jiffies + rxrpc_call_acks_timeout);
13338 + spin_unlock(&call->lock);
13341 +} /* end rxrpc_call_resend() */
13343 +/*****************************************************************************/
13345 + * handle an ICMP error being applied to a call
13347 +void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno)
13349 + _enter("%p{%u},%d",call,ntohl(call->call_id),errno);
13351 + /* if this call is already aborted, then just wake up any waiters */
13352 + if (call->app_call_state==RXRPC_CSTATE_ERROR) {
13353 + call->app_error_func(call);
13356 + /* tell the app layer what happened */
13357 + spin_lock(&call->lock);
13358 + call->app_call_state = RXRPC_CSTATE_ERROR;
13361 + call->app_err_state = RXRPC_ESTATE_LOCAL_ERROR;
13363 + call->app_err_state = RXRPC_ESTATE_REMOTE_ERROR;
13364 + call->app_errno = errno;
13365 + call->app_mark = RXRPC_APP_MARK_EOF;
13366 + call->app_read_buf = NULL;
13367 + call->app_async_read = 0;
13369 + /* map the error */
13370 + call->app_aemap_func(call);
13372 + del_timer_sync(&call->acks_timeout);
13373 + del_timer_sync(&call->rcv_timeout);
13374 + del_timer_sync(&call->ackr_dfr_timo);
13376 + spin_unlock(&call->lock);
13378 + call->app_error_func(call);
13382 +} /* end rxrpc_call_handle_error() */
13383 diff -urNp linux-5240/net/rxrpc/connection.c linux-5250/net/rxrpc/connection.c
13384 --- linux-5240/net/rxrpc/connection.c 1970-01-01 01:00:00.000000000 +0100
13385 +++ linux-5250/net/rxrpc/connection.c
13387 +/* connection.c: Rx connection routines
13389 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
13390 + * Written by David Howells (dhowells@redhat.com)
13392 + * This program is free software; you can redistribute it and/or
13393 + * modify it under the terms of the GNU General Public License
13394 + * as published by the Free Software Foundation; either version
13395 + * 2 of the License, or (at your option) any later version.
13398 +#include <linux/sched.h>
13399 +#include <linux/slab.h>
13400 +#include <linux/module.h>
13401 +#include <rxrpc/rxrpc.h>
13402 +#include <rxrpc/transport.h>
13403 +#include <rxrpc/peer.h>
13404 +#include <rxrpc/connection.h>
13405 +#include <rxrpc/call.h>
13406 +#include <rxrpc/message.h>
13407 +#include <linux/udp.h>
13408 +#include <linux/ip.h>
13409 +#include <net/sock.h>
13410 +#include <asm/uaccess.h>
13411 +#include "internal.h"
13413 +__RXACCT_DECL(atomic_t rxrpc_connection_count);
13415 +LIST_HEAD(rxrpc_conns);
13416 +DECLARE_RWSEM(rxrpc_conns_sem);
13418 +static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
13420 + struct rxrpc_connection *conn = list_entry(timer,struct rxrpc_connection,timeout);
13422 + _debug("Rx CONN TIMEOUT [%p{u=%d}]",conn,atomic_read(&conn->usage));
13424 + rxrpc_conn_do_timeout(conn);
13427 +static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
13428 + timed_out: __rxrpc_conn_timeout,
13431 +/*****************************************************************************/
13433 + * create a new connection record
13435 +static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
13436 + struct rxrpc_connection **_conn)
13438 + struct rxrpc_connection *conn;
13440 + _enter("%p",peer);
13442 + /* allocate and initialise a connection record */
13443 + conn = kmalloc(sizeof(struct rxrpc_connection),GFP_KERNEL);
13445 + _leave(" = -ENOMEM");
13449 + memset(conn,0,sizeof(struct rxrpc_connection));
13450 + atomic_set(&conn->usage,1);
13452 + INIT_LIST_HEAD(&conn->link);
13453 + init_waitqueue_head(&conn->chanwait);
13454 + spin_lock_init(&conn->lock);
13455 + rxrpc_timer_init(&conn->timeout,&rxrpc_conn_timer_ops);
13457 + conn->atime = xtime;
13458 + conn->mtu_size = 1024;
13459 + conn->peer = peer;
13460 + conn->trans = peer->trans;
13462 + __RXACCT(atomic_inc(&rxrpc_connection_count));
13464 + _leave(" = 0 (%p)",conn);
13467 +} /* end __rxrpc_create_connection() */
13469 +/*****************************************************************************/
13471 + * create a new connection record for outgoing connections
13473 +int rxrpc_create_connection(struct rxrpc_transport *trans,
13476 + unsigned short service_id,
13478 + struct rxrpc_connection **_conn)
13480 + struct rxrpc_connection *conn;
13481 + struct rxrpc_peer *peer;
13484 + _enter("%p{%hu},%u,%hu",trans,trans->port,ntohs(port),service_id);
13486 + /* get a peer record */
13487 + ret = rxrpc_peer_lookup(trans,addr,&peer);
13489 + _leave(" = %d",ret);
13493 + /* allocate and initialise a connection record */
13494 + ret = __rxrpc_create_connection(peer,&conn);
13496 + rxrpc_put_peer(peer);
13497 + _leave(" = %d",ret);
13501 + /* fill in the specific bits */
13502 + conn->addr.sin_family = AF_INET;
13503 + conn->addr.sin_port = port;
13504 + conn->addr.sin_addr.s_addr = addr;
13506 + conn->in_epoch = rxrpc_epoch;
13507 + conn->out_epoch = rxrpc_epoch;
13508 + conn->in_clientflag = 0;
13509 + conn->out_clientflag = RXRPC_CLIENT_INITIATED;
13510 + conn->conn_id = htonl((unsigned) conn & RXRPC_CIDMASK);
13511 + conn->service_id = htons(service_id);
13513 + /* attach to peer */
13514 + conn->peer = peer;
13516 + write_lock(&peer->conn_lock);
13517 + list_add_tail(&conn->link,&peer->conn_active);
13518 + atomic_inc(&peer->conn_count);
13519 + write_unlock(&peer->conn_lock);
13521 + down_write(&rxrpc_conns_sem);
13522 + list_add_tail(&conn->proc_link,&rxrpc_conns);
13523 + up_write(&rxrpc_conns_sem);
13526 + _leave(" = 0 (%p)",conn);
13529 +} /* end rxrpc_create_connection() */
13531 +/*****************************************************************************/
13533 + * lookup the connection for an incoming packet
13534 + * - create a new connection record for unrecorded incoming connections
13536 +int rxrpc_connection_lookup(struct rxrpc_peer *peer,
13537 + struct rxrpc_message *msg,
13538 + struct rxrpc_connection **_conn)
13540 + struct rxrpc_connection *conn, *candidate = NULL;
13541 + struct list_head *_p;
13542 + int ret, fresh = 0;
13543 + u32 x_epoch, x_connid;
13544 + u16 x_port, x_secix, x_servid;
13547 + _enter("%p{{%hu}},%u,%hu",
13548 + peer,peer->trans->port,ntohs(msg->pkt->h.uh->source),ntohs(msg->hdr.serviceId));
13550 + x_port = msg->pkt->h.uh->source;
13551 + x_epoch = msg->hdr.epoch;
13552 + x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
13553 + x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
13554 + x_servid = msg->hdr.serviceId;
13555 + x_secix = msg->hdr.securityIndex;
13557 + /* [common case] search the transport's active list first */
13558 + read_lock(&peer->conn_lock);
13559 + list_for_each(_p,&peer->conn_active) {
13560 + conn = list_entry(_p,struct rxrpc_connection,link);
13561 + if (conn->addr.sin_port == x_port &&
13562 + conn->in_epoch == x_epoch &&
13563 + conn->conn_id == x_connid &&
13564 + conn->security_ix == x_secix &&
13565 + conn->service_id == x_servid &&
13566 + conn->in_clientflag == x_clflag)
13567 + goto found_active;
13569 + read_unlock(&peer->conn_lock);
13571 + /* [uncommon case] not active
13572 + * - create a candidate for a new record if an inbound connection
13573 + * - only examine the graveyard for an outbound connection
13576 + ret = __rxrpc_create_connection(peer,&candidate);
13578 + _leave(" = %d",ret);
13582 + /* fill in the specifics */
13583 + candidate->addr.sin_family = AF_INET;
13584 + candidate->addr.sin_port = x_port;
13585 + candidate->addr.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
13586 + candidate->in_epoch = x_epoch;
13587 + candidate->out_epoch = x_epoch;
13588 + candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
13589 + candidate->out_clientflag = 0;
13590 + candidate->conn_id = x_connid;
13591 + candidate->service_id = x_servid;
13592 + candidate->security_ix = x_secix;
13595 + /* search the active list again, just in case it appeared whilst we were busy */
13596 + write_lock(&peer->conn_lock);
13597 + list_for_each(_p,&peer->conn_active) {
13598 + conn = list_entry(_p,struct rxrpc_connection,link);
13599 + if (conn->addr.sin_port == x_port &&
13600 + conn->in_epoch == x_epoch &&
13601 + conn->conn_id == x_connid &&
13602 + conn->security_ix == x_secix &&
13603 + conn->service_id == x_servid &&
13604 + conn->in_clientflag == x_clflag)
13605 + goto found_active_second_chance;
13608 + /* search the transport's graveyard list */
13609 + spin_lock(&peer->conn_gylock);
13610 + list_for_each(_p,&peer->conn_graveyard) {
13611 + conn = list_entry(_p,struct rxrpc_connection,link);
13612 + if (conn->addr.sin_port == x_port &&
13613 + conn->in_epoch == x_epoch &&
13614 + conn->conn_id == x_connid &&
13615 + conn->security_ix == x_secix &&
13616 + conn->service_id == x_servid &&
13617 + conn->in_clientflag == x_clflag)
13618 + goto found_in_graveyard;
13620 + spin_unlock(&peer->conn_gylock);
13622 + /* outbound connections aren't created here */
13624 + write_unlock(&peer->conn_lock);
13625 + _leave(" = -ENOENT");
13629 + /* we can now add the new candidate to the list */
13630 + rxrpc_get_peer(peer);
13631 + conn = candidate;
13632 + candidate = NULL;
13633 + atomic_inc(&peer->conn_count);
13637 + list_add_tail(&conn->link,&peer->conn_active);
13640 + write_unlock(&peer->conn_lock);
13643 + __RXACCT(atomic_dec(&rxrpc_connection_count));
13644 + kfree(candidate);
13648 + down_write(&rxrpc_conns_sem);
13649 + list_add_tail(&conn->proc_link,&rxrpc_conns);
13650 + up_write(&rxrpc_conns_sem);
13655 + _leave(" = 0 (%p)",conn);
13658 + /* handle the connection being found in the active list straight off */
13660 + rxrpc_get_connection(conn);
13661 + read_unlock(&peer->conn_lock);
13664 + /* handle resurrecting a connection from the graveyard */
13665 + found_in_graveyard:
13666 + rxrpc_get_peer(peer);
13667 + rxrpc_get_connection(conn);
13668 + rxrpc_krxtimod_del_timer(&conn->timeout);
13669 + list_del_init(&conn->link);
13670 + spin_unlock(&peer->conn_gylock);
13671 + goto make_active;
13673 + /* handle finding the connection on the second time through the active list */
13674 + found_active_second_chance:
13675 + rxrpc_get_connection(conn);
13676 + goto success_uwfree;
13678 +} /* end rxrpc_connection_lookup() */
13680 +/*****************************************************************************/
13682 + * finish using a connection record
13683 + * - it will be transferred to the peer's connection graveyard when refcount reaches 0
13685 +void rxrpc_put_connection(struct rxrpc_connection *conn)
13687 + struct rxrpc_peer *peer = conn->peer;
13689 + _enter("%p{u=%d p=%hu}",conn,atomic_read(&conn->usage),ntohs(conn->addr.sin_port));
13691 + /* sanity check */
13692 + if (atomic_read(&conn->usage)<=0)
13695 + spin_lock(&peer->conn_gylock);
13696 + if (likely(!atomic_dec_and_test(&conn->usage))) {
13697 + spin_unlock(&peer->conn_gylock);
13702 + /* move to graveyard queue */
13703 + list_del(&conn->link);
13704 + list_add_tail(&conn->link,&peer->conn_graveyard);
13706 + /* discard in 100 secs */
13707 + rxrpc_krxtimod_add_timer(&conn->timeout,20*HZ);
13709 + spin_unlock(&peer->conn_gylock);
13711 + rxrpc_put_peer(conn->peer);
13713 + _leave(" [killed]");
13714 +} /* end rxrpc_put_connection() */
13716 +/*****************************************************************************/
13718 + * free a connection record
13720 +void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
13722 + struct rxrpc_peer *peer;
13724 + _enter("%p{u=%d p=%hu}",conn,atomic_read(&conn->usage),ntohs(conn->addr.sin_port));
13726 + peer = conn->peer;
13728 + if (atomic_read(&conn->usage)<0)
13731 + /* remove from graveyard if still dead */
13732 + spin_lock(&peer->conn_gylock);
13733 + if (atomic_read(&conn->usage)==0) {
13734 + list_del_init(&conn->link);
13739 + spin_unlock(&peer->conn_gylock);
13743 + return; /* resurrected */
13746 + _debug("--- Destroying Connection %p ---",conn);
13748 + down_write(&rxrpc_conns_sem);
13749 + list_del(&conn->proc_link);
13750 + up_write(&rxrpc_conns_sem);
13752 + __RXACCT(atomic_dec(&rxrpc_connection_count));
13755 + /* if the graveyard is now empty, wake up anyone waiting for that */
13756 + if (atomic_dec_and_test(&peer->conn_count))
13757 + wake_up(&peer->conn_gy_waitq);
13759 + _leave(" [destroyed]");
13760 +} /* end rxrpc_conn_do_timeout() */
13762 +/*****************************************************************************/
13764 + * clear all connection records from a peer endpoint
13766 +void rxrpc_conn_clearall(struct rxrpc_peer *peer)
13768 + DECLARE_WAITQUEUE(myself,current);
13770 + struct rxrpc_connection *conn;
13773 + _enter("%p",peer);
13775 + /* there shouldn't be any active conns remaining */
13776 + if (!list_empty(&peer->conn_active))
13779 + /* manually timeout all conns in the graveyard */
13780 + spin_lock(&peer->conn_gylock);
13781 + while (!list_empty(&peer->conn_graveyard)) {
13782 + conn = list_entry(peer->conn_graveyard.next,struct rxrpc_connection,link);
13783 + err = rxrpc_krxtimod_del_timer(&conn->timeout);
13784 + spin_unlock(&peer->conn_gylock);
13787 + rxrpc_conn_do_timeout(conn);
13789 + spin_lock(&peer->conn_gylock);
13791 + spin_unlock(&peer->conn_gylock);
13793 + /* wait for the the conn graveyard to be completely cleared */
13794 + set_current_state(TASK_UNINTERRUPTIBLE);
13795 + add_wait_queue(&peer->conn_gy_waitq,&myself);
13797 + while (atomic_read(&peer->conn_count)!=0) {
13799 + set_current_state(TASK_UNINTERRUPTIBLE);
13802 + remove_wait_queue(&peer->conn_gy_waitq,&myself);
13803 + set_current_state(TASK_RUNNING);
13807 +} /* end rxrpc_conn_clearall() */
13809 +/*****************************************************************************/
13811 + * allocate and prepare a message for sending out through the transport endpoint
13813 +int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
13814 + struct rxrpc_call *call,
13817 + struct iovec diov[],
13819 + struct rxrpc_message **_msg)
13821 + struct rxrpc_message *msg;
13824 + _enter("%p{%d},%p,%u",conn,ntohs(conn->addr.sin_port),call,type);
13827 + _leave(" = -EINVAL");
13831 + msg = kmalloc(sizeof(struct rxrpc_message),alloc_flags);
13833 + _leave(" = -ENOMEM");
13837 + memset(msg,0,sizeof(*msg));
13838 + atomic_set(&msg->usage,1);
13840 + INIT_LIST_HEAD(&msg->link);
13842 + msg->state = RXRPC_MSG_PREPARED;
13844 + msg->hdr.epoch = conn->out_epoch;
13845 + msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0);
13846 + msg->hdr.callNumber = call ? call->call_id : 0;
13847 + msg->hdr.type = type;
13848 + msg->hdr.flags = conn->out_clientflag;
13849 + msg->hdr.securityIndex = conn->security_ix;
13850 + msg->hdr.serviceId = conn->service_id;
13852 + /* generate sequence numbers for data packets */
13855 + case RXRPC_PACKET_TYPE_DATA:
13856 + msg->seq = ++call->snd_seq_count;
13857 + msg->hdr.seq = htonl(msg->seq);
13859 + case RXRPC_PACKET_TYPE_ACK:
13860 + /* ACK sequence numbers are complicated. The following may be wrong:
13861 + * - jumbo packet ACKs should have a seq number
13862 + * - normal ACKs should not
13869 + msg->dcount = dcount + 1;
13870 + msg->dsize = sizeof(msg->hdr);
13871 + msg->data[0].iov_len = sizeof(msg->hdr);
13872 + msg->data[0].iov_base = &msg->hdr;
13874 + for (loop=0; loop<dcount; loop++) {
13875 + msg->dsize += diov[loop].iov_len;
13876 + msg->data[loop+1].iov_len = diov[loop].iov_len;
13877 + msg->data[loop+1].iov_base = diov[loop].iov_base;
13880 + __RXACCT(atomic_inc(&rxrpc_message_count));
13882 + _leave(" = 0 (%p) #%d",msg,atomic_read(&rxrpc_message_count));
13884 +} /* end rxrpc_conn_newmsg() */
13886 +/*****************************************************************************/
13890 +void __rxrpc_put_message(struct rxrpc_message *msg)
13894 + _enter("%p #%d",msg,atomic_read(&rxrpc_message_count));
13896 + if (msg->pkt) kfree_skb(msg->pkt);
13897 + if (msg->conn) rxrpc_put_connection(msg->conn);
13899 + for (loop=0; loop<8; loop++)
13900 + if (test_bit(loop,&msg->dfree))
13901 + kfree(msg->data[loop].iov_base);
13903 + __RXACCT(atomic_dec(&rxrpc_message_count));
13907 +} /* end __rxrpc_put_message() */
13909 +/*****************************************************************************/
13911 + * send a message out through the transport endpoint
13913 +int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg)
13915 + struct msghdr msghdr;
13916 + mm_segment_t oldfs;
13919 + _enter("%p{%d}",conn,ntohs(conn->addr.sin_port));
13921 + /* fill in some fields in the header */
13922 + spin_lock(&conn->lock);
13923 + msg->hdr.serial = htonl(++conn->serial_counter);
13924 + msg->rttdone = 0;
13925 + spin_unlock(&conn->lock);
13927 + /* set up the message to be transmitted */
13928 + msghdr.msg_name = &conn->addr;
13929 + msghdr.msg_namelen = sizeof(conn->addr);
13930 + msghdr.msg_iov = msg->data;
13931 + msghdr.msg_iovlen = msg->dcount;
13932 + msghdr.msg_control = NULL;
13933 + msghdr.msg_controllen = 0;
13934 + msghdr.msg_flags = MSG_CONFIRM|MSG_DONTWAIT;
13936 + _net("Sending message type %d of %d bytes to %08x:%d",
13939 + htonl(conn->addr.sin_addr.s_addr),
13940 + htons(conn->addr.sin_port));
13942 + /* send the message */
13943 + oldfs = get_fs();
13944 + set_fs(KERNEL_DS);
13945 + ret = sock_sendmsg(conn->trans->socket,&msghdr,msg->dsize);
13949 + msg->state = RXRPC_MSG_ERROR;
13952 + msg->state = RXRPC_MSG_SENT;
13955 + spin_lock(&conn->lock);
13956 + msg->stamp = conn->atime = xtime;
13957 + spin_unlock(&conn->lock);
13960 + _leave(" = %d",ret);
13963 +} /* end rxrpc_conn_sendmsg() */
13965 +/*****************************************************************************/
13967 + * deal with a subsequent call packet
13969 +int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
13970 + struct rxrpc_call *call,
13971 + struct rxrpc_message *msg)
13973 + struct rxrpc_message *pmsg;
13974 + struct list_head *_p;
13975 + unsigned cix, seq;
13978 + _enter("%p,%p,%p",conn,call,msg);
13981 + cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
13983 + spin_lock(&conn->lock);
13984 + call = conn->channels[cix];
13986 + if (!call || call->call_id != msg->hdr.callNumber) {
13987 + spin_unlock(&conn->lock);
13988 + rxrpc_trans_immediate_abort(conn->trans,msg,-ENOENT);
13992 + rxrpc_get_call(call);
13993 + spin_unlock(&conn->lock);
13997 + rxrpc_get_call(call);
14000 + _proto("Received packet %%%u [%u] on call %hu:%u:%u",
14001 + htonl(msg->hdr.serial),
14002 + htonl(msg->hdr.seq),
14003 + htons(msg->hdr.serviceId),
14004 + htonl(conn->conn_id),
14005 + htonl(call->call_id));
14007 + call->pkt_rcv_count++;
14009 + if (msg->pkt->dst && msg->pkt->dst->dev)
14010 + conn->peer->if_mtu = msg->pkt->dst->dev->mtu - msg->pkt->dst->dev->hard_header_len;
14012 + /* queue on the call in seq order */
14013 + rxrpc_get_message(msg);
14016 + spin_lock(&call->lock);
14017 + list_for_each(_p,&call->rcv_receiveq) {
14018 + pmsg = list_entry(_p,struct rxrpc_message,link);
14019 + if (pmsg->seq>seq)
14022 + list_add_tail(&msg->link,_p);
14024 + /* reset the activity timeout */
14025 + call->flags |= RXRPC_CALL_RCV_PKT;
14026 + mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ);
14028 + spin_unlock(&call->lock);
14030 + rxrpc_krxiod_queue_call(call);
14032 + rxrpc_put_call(call);
14034 + _leave(" = %d",ret);
14037 +} /* end rxrpc_conn_receive_call_packet() */
14039 +/*****************************************************************************/
14041 + * handle an ICMP error being applied to a connection
14043 +void rxrpc_conn_handle_error(struct rxrpc_connection *conn, int local, int errno)
14045 + struct rxrpc_call *calls[4];
14048 + _enter("%p{%d},%d",conn,ntohs(conn->addr.sin_port),errno);
14050 + /* get a ref to all my calls in one go */
14051 + memset(calls,0,sizeof(calls));
14052 + spin_lock(&conn->lock);
14054 + for (loop=3; loop>=0; loop--) {
14055 + if (conn->channels[loop]) {
14056 + calls[loop] = conn->channels[loop];
14057 + rxrpc_get_call(calls[loop]);
14061 + spin_unlock(&conn->lock);
14063 + /* now kick them all */
14064 + for (loop=3; loop>=0; loop--) {
14065 + if (calls[loop]) {
14066 + rxrpc_call_handle_error(calls[loop],local,errno);
14067 + rxrpc_put_call(calls[loop]);
14072 +} /* end rxrpc_conn_handle_error() */
14073 diff -urNp linux-5240/net/rxrpc/internal.h linux-5250/net/rxrpc/internal.h
14074 --- linux-5240/net/rxrpc/internal.h 1970-01-01 01:00:00.000000000 +0100
14075 +++ linux-5250/net/rxrpc/internal.h
14077 +/* internal.h: internal Rx RPC stuff
14079 + * Copyright (c) 2002 David Howells (dhowells@redhat.com).
14082 +#ifndef RXRPC_INTERNAL_H
14083 +#define RXRPC_INTERNAL_H
14085 +#include <linux/compiler.h>
14086 +#include <linux/kernel.h>
14089 + * debug accounting
14092 +#define __RXACCT_DECL(X) X
14093 +#define __RXACCT(X) do { X; } while(0)
14095 +#define __RXACCT_DECL(X)
14096 +#define __RXACCT(X) do { } while(0)
14099 +__RXACCT_DECL(extern atomic_t rxrpc_transport_count);
14100 +__RXACCT_DECL(extern atomic_t rxrpc_peer_count);
14101 +__RXACCT_DECL(extern atomic_t rxrpc_connection_count);
14102 +__RXACCT_DECL(extern atomic_t rxrpc_call_count);
14103 +__RXACCT_DECL(extern atomic_t rxrpc_message_count);
14108 +#define kenter(FMT,...) printk("==> %s("FMT")\n",__FUNCTION__,##__VA_ARGS__)
14109 +#define kleave(FMT,...) printk("<== %s()"FMT"\n",__FUNCTION__,##__VA_ARGS__)
14110 +#define kdebug(FMT,...) printk(" "FMT"\n",##__VA_ARGS__)
14111 +#define kproto(FMT,...) printk("### "FMT"\n",##__VA_ARGS__)
14112 +#define knet(FMT,...) printk(" "FMT"\n",##__VA_ARGS__)
14115 +#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
14116 +#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
14117 +#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
14118 +#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
14119 +#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
14121 +#define _enter(FMT,...) do { if (rxrpc_ktrace) kenter(FMT,##__VA_ARGS__); } while(0)
14122 +#define _leave(FMT,...) do { if (rxrpc_ktrace) kleave(FMT,##__VA_ARGS__); } while(0)
14123 +#define _debug(FMT,...) do { if (rxrpc_kdebug) kdebug(FMT,##__VA_ARGS__); } while(0)
14124 +#define _proto(FMT,...) do { if (rxrpc_kproto) kproto(FMT,##__VA_ARGS__); } while(0)
14125 +#define _net(FMT,...) do { if (rxrpc_knet) knet (FMT,##__VA_ARGS__); } while(0)
14131 +extern struct list_head rxrpc_calls;
14132 +extern struct rw_semaphore rxrpc_calls_sem;
14137 +extern struct list_head rxrpc_conns;
14138 +extern struct rw_semaphore rxrpc_conns_sem;
14140 +extern void rxrpc_conn_do_timeout(struct rxrpc_connection *conn);
14141 +extern void rxrpc_conn_clearall(struct rxrpc_peer *peer);
14146 +extern struct list_head rxrpc_peers;
14147 +extern struct rw_semaphore rxrpc_peers_sem;
14149 +extern void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
14150 + struct rxrpc_message *msg,
14151 + struct rxrpc_message *resp);
14153 +extern void rxrpc_peer_clearall(struct rxrpc_transport *trans);
14155 +extern void rxrpc_peer_do_timeout(struct rxrpc_peer *peer);
14161 +#ifdef CONFIG_PROC_FS
14162 +extern int rxrpc_proc_init(void);
14163 +extern void rxrpc_proc_cleanup(void);
14169 +extern struct list_head rxrpc_proc_transports;
14170 +extern struct rw_semaphore rxrpc_proc_transports_sem;
14172 +#endif /* RXRPC_INTERNAL_H */
14173 diff -urNp linux-5240/net/rxrpc/krxiod.c linux-5250/net/rxrpc/krxiod.c
14174 --- linux-5240/net/rxrpc/krxiod.c 1970-01-01 01:00:00.000000000 +0100
14175 +++ linux-5250/net/rxrpc/krxiod.c
14177 +/* krxiod.c: Rx I/O daemon
14179 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
14180 + * Written by David Howells (dhowells@redhat.com)
14182 + * This program is free software; you can redistribute it and/or
14183 + * modify it under the terms of the GNU General Public License
14184 + * as published by the Free Software Foundation; either version
14185 + * 2 of the License, or (at your option) any later version.
14188 +#include <linux/version.h>
14189 +#include <linux/sched.h>
14190 +#include <linux/completion.h>
14191 +#include <linux/spinlock.h>
14192 +#include <linux/init.h>
14193 +#include <rxrpc/krxiod.h>
14194 +#include <rxrpc/transport.h>
14195 +#include <rxrpc/peer.h>
14196 +#include <rxrpc/call.h>
14197 +#include "internal.h"
14199 +static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
14200 +static DECLARE_COMPLETION(rxrpc_krxiod_dead);
14202 +static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
14204 +static LIST_HEAD(rxrpc_krxiod_transportq);
14205 +static spinlock_t rxrpc_krxiod_transportq_lock = SPIN_LOCK_UNLOCKED;
14207 +static LIST_HEAD(rxrpc_krxiod_callq);
14208 +static spinlock_t rxrpc_krxiod_callq_lock = SPIN_LOCK_UNLOCKED;
14210 +static volatile int rxrpc_krxiod_die;
14212 +/*****************************************************************************/
14216 +static int rxrpc_krxiod(void *arg)
14218 + DECLARE_WAITQUEUE(krxiod,current);
14222 + printk("Started krxiod %d\n",current->pid);
14223 + strcpy(current->comm,"krxiod");
14227 + /* only certain signals are of interest */
14228 + spin_lock_irq(¤t->sigmask_lock);
14229 + siginitsetinv(¤t->blocked,0);
14230 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
14231 + recalc_sigpending();
14233 + recalc_sigpending(current);
14235 + spin_unlock_irq(¤t->sigmask_lock);
14237 + /* loop around waiting for work to do */
14239 + /* wait for work or to be told to exit */
14240 + _debug("### Begin Wait");
14241 + if (!atomic_read(&rxrpc_krxiod_qcount)) {
14242 + set_current_state(TASK_INTERRUPTIBLE);
14244 + add_wait_queue(&rxrpc_krxiod_sleepq,&krxiod);
14247 + set_current_state(TASK_INTERRUPTIBLE);
14248 + if (atomic_read(&rxrpc_krxiod_qcount) ||
14249 + rxrpc_krxiod_die ||
14250 + signal_pending(current))
14256 + remove_wait_queue(&rxrpc_krxiod_sleepq,&krxiod);
14257 + set_current_state(TASK_RUNNING);
14259 + _debug("### End Wait");
14261 + /* do work if been given some to do */
14262 + _debug("### Begin Work");
14264 + /* see if there's a transport in need of attention */
14265 + if (!list_empty(&rxrpc_krxiod_transportq)) {
14266 + struct rxrpc_transport *trans = NULL;
14268 + spin_lock_irq(&rxrpc_krxiod_transportq_lock);
14270 + if (!list_empty(&rxrpc_krxiod_transportq)) {
14271 + trans = list_entry(rxrpc_krxiod_transportq.next,
14272 + struct rxrpc_transport,krxiodq_link);
14273 + list_del_init(&trans->krxiodq_link);
14274 + atomic_dec(&rxrpc_krxiod_qcount);
14276 + /* make sure it hasn't gone away and doesn't go away */
14277 + if (atomic_read(&trans->usage)>0)
14278 + rxrpc_get_transport(trans);
14283 + spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
14286 + rxrpc_trans_receive_packet(trans);
14287 + rxrpc_put_transport(trans);
14291 + /* see if there's a call in need of attention */
14292 + if (!list_empty(&rxrpc_krxiod_callq)) {
14293 + struct rxrpc_call *call = NULL;
14295 + spin_lock_irq(&rxrpc_krxiod_callq_lock);
14297 + if (!list_empty(&rxrpc_krxiod_callq)) {
14298 + call = list_entry(rxrpc_krxiod_callq.next,
14299 + struct rxrpc_call,rcv_krxiodq_lk);
14300 + list_del_init(&call->rcv_krxiodq_lk);
14301 + atomic_dec(&rxrpc_krxiod_qcount);
14303 + /* make sure it hasn't gone away and doesn't go away */
14304 + if (atomic_read(&call->usage)>0) {
14305 + _debug("@@@ KRXIOD Begin Attend Call %p",call);
14306 + rxrpc_get_call(call);
14313 + spin_unlock_irq(&rxrpc_krxiod_callq_lock);
14316 + rxrpc_call_do_stuff(call);
14317 + rxrpc_put_call(call);
14318 + _debug("@@@ KRXIOD End Attend Call %p",call);
14322 + _debug("### End Work");
14324 + /* discard pending signals */
14325 + while (signal_pending(current)) {
14326 + spin_lock_irq(¤t->sigmask_lock);
14327 + dequeue_signal(¤t->blocked,&sinfo);
14328 + spin_unlock_irq(¤t->sigmask_lock);
14331 + } while (!rxrpc_krxiod_die);
14333 + /* and that's all */
14334 + complete_and_exit(&rxrpc_krxiod_dead,0);
14336 +} /* end rxrpc_krxiod() */
14338 +/*****************************************************************************/
14340 + * start up a krxiod daemon
14342 +int __init rxrpc_krxiod_init(void)
14344 + return kernel_thread(rxrpc_krxiod,NULL,0);
14346 +} /* end rxrpc_krxiod_init() */
14348 +/*****************************************************************************/
14350 + * kill the krxiod daemon and wait for it to complete
14352 +void rxrpc_krxiod_kill(void)
14354 + rxrpc_krxiod_die = 1;
14355 + wake_up_all(&rxrpc_krxiod_sleepq);
14356 + wait_for_completion(&rxrpc_krxiod_dead);
14358 +} /* end rxrpc_krxiod_kill() */
14360 +/*****************************************************************************/
14362 + * queue a transport for attention by krxiod
14364 +void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
14366 + unsigned long flags;
14370 + if (list_empty(&trans->krxiodq_link)) {
14371 + spin_lock_irqsave(&rxrpc_krxiod_transportq_lock,flags);
14373 + if (list_empty(&trans->krxiodq_link)) {
14374 + if (atomic_read(&trans->usage)>0) {
14375 + list_add_tail(&trans->krxiodq_link,&rxrpc_krxiod_transportq);
14376 + atomic_inc(&rxrpc_krxiod_qcount);
14380 + spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock,flags);
14381 + wake_up_all(&rxrpc_krxiod_sleepq);
14386 +} /* end rxrpc_krxiod_queue_transport() */
14388 +/*****************************************************************************/
14390 + * dequeue a transport from krxiod's attention queue
14392 +void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
14394 + unsigned long flags;
14398 + spin_lock_irqsave(&rxrpc_krxiod_transportq_lock,flags);
14399 + if (!list_empty(&trans->krxiodq_link)) {
14400 + list_del_init(&trans->krxiodq_link);
14401 + atomic_dec(&rxrpc_krxiod_qcount);
14403 + spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock,flags);
14407 +} /* end rxrpc_krxiod_dequeue_transport() */
14409 +/*****************************************************************************/
14411 + * queue a call for attention by krxiod
14413 +void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
14415 + unsigned long flags;
14417 + if (list_empty(&call->rcv_krxiodq_lk)) {
14418 + spin_lock_irqsave(&rxrpc_krxiod_callq_lock,flags);
14419 + if (atomic_read(&call->usage)>0) {
14420 + list_add_tail(&call->rcv_krxiodq_lk,&rxrpc_krxiod_callq);
14421 + atomic_inc(&rxrpc_krxiod_qcount);
14423 + spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock,flags);
14425 + wake_up_all(&rxrpc_krxiod_sleepq);
14427 +} /* end rxrpc_krxiod_queue_call() */
14429 +/*****************************************************************************/
14431 + * dequeue a call from krxiod's attention queue
14433 +void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
14435 + unsigned long flags;
14437 + spin_lock_irqsave(&rxrpc_krxiod_callq_lock,flags);
14438 + if (!list_empty(&call->rcv_krxiodq_lk)) {
14439 + list_del_init(&call->rcv_krxiodq_lk);
14440 + atomic_dec(&rxrpc_krxiod_qcount);
14442 + spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock,flags);
14444 +} /* end rxrpc_krxiod_dequeue_call() */
14445 diff -urNp linux-5240/net/rxrpc/krxsecd.c linux-5250/net/rxrpc/krxsecd.c
14446 --- linux-5240/net/rxrpc/krxsecd.c 1970-01-01 01:00:00.000000000 +0100
14447 +++ linux-5250/net/rxrpc/krxsecd.c
14449 +/* krxsecd.c: Rx security daemon
14451 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
14452 + * Written by David Howells (dhowells@redhat.com)
14454 + * This program is free software; you can redistribute it and/or
14455 + * modify it under the terms of the GNU General Public License
14456 + * as published by the Free Software Foundation; either version
14457 + * 2 of the License, or (at your option) any later version.
14459 + * This daemon deals with:
14460 + * - consulting the application as to whether inbound peers and calls should be authorised
14461 + * - generating security challenges for inbound connections
14462 + * - responding to security challenges on outbound connections
14465 +#include <linux/version.h>
14466 +#include <linux/module.h>
14467 +#include <linux/sched.h>
14468 +#include <linux/completion.h>
14469 +#include <linux/spinlock.h>
14470 +#include <linux/init.h>
14471 +#include <rxrpc/krxsecd.h>
14472 +#include <rxrpc/transport.h>
14473 +#include <rxrpc/connection.h>
14474 +#include <rxrpc/message.h>
14475 +#include <rxrpc/peer.h>
14476 +#include <rxrpc/call.h>
14477 +#include <linux/udp.h>
14478 +#include <linux/ip.h>
14479 +#include <net/sock.h>
14480 +#include "internal.h"
14482 +static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxsecd_sleepq);
14483 +static DECLARE_COMPLETION(rxrpc_krxsecd_dead);
14484 +static volatile int rxrpc_krxsecd_die;
14486 +static atomic_t rxrpc_krxsecd_qcount;
14488 +/* queue of unprocessed inbound messages with seqno #1 and RXRPC_CLIENT_INITIATED flag set */
14489 +static LIST_HEAD(rxrpc_krxsecd_initmsgq);
14490 +static spinlock_t rxrpc_krxsecd_initmsgq_lock = SPIN_LOCK_UNLOCKED;
14492 +static void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg);
14494 +/*****************************************************************************/
14496 + * Rx security daemon
14498 +static int rxrpc_krxsecd(void *arg)
14500 + DECLARE_WAITQUEUE(krxsecd,current);
14505 + printk("Started krxsecd %d\n",current->pid);
14506 + strcpy(current->comm,"krxsecd");
14510 + /* only certain signals are of interest */
14511 + spin_lock_irq(¤t->sigmask_lock);
14512 + siginitsetinv(¤t->blocked,0);
14513 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
14514 + recalc_sigpending();
14516 + recalc_sigpending(current);
14518 + spin_unlock_irq(¤t->sigmask_lock);
14520 + /* loop around waiting for work to do */
14522 + /* wait for work or to be told to exit */
14523 + _debug("### Begin Wait");
14524 + if (!atomic_read(&rxrpc_krxsecd_qcount)) {
14525 + set_current_state(TASK_INTERRUPTIBLE);
14527 + add_wait_queue(&rxrpc_krxsecd_sleepq,&krxsecd);
14530 + set_current_state(TASK_INTERRUPTIBLE);
14531 + if (atomic_read(&rxrpc_krxsecd_qcount) ||
14532 + rxrpc_krxsecd_die ||
14533 + signal_pending(current))
14539 + remove_wait_queue(&rxrpc_krxsecd_sleepq,&krxsecd);
14540 + set_current_state(TASK_RUNNING);
14542 + die = rxrpc_krxsecd_die;
14543 + _debug("### End Wait");
14545 + /* see if there're incoming calls in need of authenticating */
14546 + _debug("### Begin Inbound Calls");
14548 + if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
14549 + struct rxrpc_message *msg = NULL;
14551 + spin_lock(&rxrpc_krxsecd_initmsgq_lock);
14553 + if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
14554 + msg = list_entry(rxrpc_krxsecd_initmsgq.next,
14555 + struct rxrpc_message,link);
14556 + list_del_init(&msg->link);
14557 + atomic_dec(&rxrpc_krxsecd_qcount);
14560 + spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
14563 + rxrpc_krxsecd_process_incoming_call(msg);
14564 + rxrpc_put_message(msg);
14568 + _debug("### End Inbound Calls");
14570 + /* discard pending signals */
14571 + while (signal_pending(current)) {
14572 + spin_lock_irq(¤t->sigmask_lock);
14573 + dequeue_signal(¤t->blocked,&sinfo);
14574 + spin_unlock_irq(¤t->sigmask_lock);
14579 + /* and that's all */
14580 + complete_and_exit(&rxrpc_krxsecd_dead,0);
14582 +} /* end rxrpc_krxsecd() */
14584 +/*****************************************************************************/
14586 + * start up a krxsecd daemon
14588 +int __init rxrpc_krxsecd_init(void)
14590 + return kernel_thread(rxrpc_krxsecd,NULL,0);
14592 +} /* end rxrpc_krxsecd_init() */
14594 +/*****************************************************************************/
14596 + * kill the krxsecd daemon and wait for it to complete
14598 +void rxrpc_krxsecd_kill(void)
14600 + rxrpc_krxsecd_die = 1;
14601 + wake_up_all(&rxrpc_krxsecd_sleepq);
14602 + wait_for_completion(&rxrpc_krxsecd_dead);
14604 +} /* end rxrpc_krxsecd_kill() */
14606 +/*****************************************************************************/
14608 + * clear all pending incoming calls for the specified transport
14610 +void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
14614 + struct rxrpc_message *msg;
14615 + struct list_head *_p, *_n;
14617 + _enter("%p",trans);
14619 + /* move all the messages for this transport onto a temp list */
14620 + spin_lock(&rxrpc_krxsecd_initmsgq_lock);
14622 + list_for_each_safe(_p,_n,&rxrpc_krxsecd_initmsgq) {
14623 + msg = list_entry(_p,struct rxrpc_message,link);
14624 + if (msg->trans==trans) {
14625 + list_del(&msg->link);
14626 + list_add_tail(&msg->link,&tmp);
14627 + atomic_dec(&rxrpc_krxsecd_qcount);
14631 + spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
14633 + /* zap all messages on the temp list */
14634 + while (!list_empty(&tmp)) {
14635 + msg = list_entry(tmp.next,struct rxrpc_message,link);
14636 + list_del_init(&msg->link);
14637 + rxrpc_put_message(msg);
14641 +} /* end rxrpc_krxsecd_clear_transport() */
14643 +/*****************************************************************************/
14645 + * queue a message on the incoming calls list
14647 +void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg)
14649 + _enter("%p",msg);
14651 + /* queue for processing by krxsecd */
14652 + spin_lock(&rxrpc_krxsecd_initmsgq_lock);
14654 + if (!rxrpc_krxsecd_die) {
14655 + rxrpc_get_message(msg);
14656 + list_add_tail(&msg->link,&rxrpc_krxsecd_initmsgq);
14657 + atomic_inc(&rxrpc_krxsecd_qcount);
14660 + spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
14662 + wake_up(&rxrpc_krxsecd_sleepq);
14665 +} /* end rxrpc_krxsecd_queue_incoming_call() */
14667 +/*****************************************************************************/
14669 + * process the initial message of an incoming call
14671 +void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg)
14673 + struct rxrpc_transport *trans = msg->trans;
14674 + struct rxrpc_service *srv;
14675 + struct rxrpc_call *call;
14676 + struct list_head *_p;
14677 + unsigned short sid;
14680 + _enter("%p{tr=%p}",msg,trans);
14682 + ret = rxrpc_incoming_call(msg->conn,msg,&call);
14686 + /* find the matching service on the transport */
14687 + sid = ntohs(msg->hdr.serviceId);
14690 + spin_lock(&trans->lock);
14691 + list_for_each(_p,&trans->services) {
14692 + srv = list_entry(_p,struct rxrpc_service,link);
14693 + if (srv->service_id==sid && try_inc_mod_count(srv->owner)) {
14694 + /* found a match (made sure it won't vanish) */
14695 + _debug("found service '%s'",srv->name);
14696 + call->owner = srv->owner;
14700 + spin_unlock(&trans->lock);
14702 + /* report the new connection
14703 + * - the func must inc the call's usage count to keep it
14706 + if (_p!=&trans->services) {
14707 + /* attempt to accept the call */
14708 + call->conn->service = srv;
14709 + call->app_attn_func = srv->attn_func;
14710 + call->app_error_func = srv->error_func;
14711 + call->app_aemap_func = srv->aemap_func;
14713 + ret = srv->new_call(call);
14715 + /* send an abort if an error occurred */
14717 + rxrpc_call_abort(call,ret);
14720 + /* formally receive and ACK the new packet */
14721 + ret = rxrpc_conn_receive_call_packet(call->conn,call,msg);
14725 + rxrpc_put_call(call);
14728 + rxrpc_trans_immediate_abort(trans,msg,ret);
14730 + _leave(" (%d)",ret);
14731 +} /* end rxrpc_krxsecd_process_incoming_call() */
14732 diff -urNp linux-5240/net/rxrpc/krxtimod.c linux-5250/net/rxrpc/krxtimod.c
14733 --- linux-5240/net/rxrpc/krxtimod.c 1970-01-01 01:00:00.000000000 +0100
14734 +++ linux-5250/net/rxrpc/krxtimod.c
14736 +/* krxtimod.c: RXRPC timeout daemon
14738 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
14739 + * Written by David Howells (dhowells@redhat.com)
14741 + * This program is free software; you can redistribute it and/or
14742 + * modify it under the terms of the GNU General Public License
14743 + * as published by the Free Software Foundation; either version
14744 + * 2 of the License, or (at your option) any later version.
14747 +#include <linux/version.h>
14748 +#include <linux/module.h>
14749 +#include <linux/init.h>
14750 +#include <linux/sched.h>
14751 +#include <linux/completion.h>
14752 +#include <rxrpc/rxrpc.h>
14753 +#include <rxrpc/krxtimod.h>
14754 +#include <asm/errno.h>
14755 +#include "internal.h"
14757 +static DECLARE_COMPLETION(krxtimod_alive);
14758 +static DECLARE_COMPLETION(krxtimod_dead);
14759 +static DECLARE_WAIT_QUEUE_HEAD(krxtimod_sleepq);
14760 +static int krxtimod_die;
14762 +static LIST_HEAD(krxtimod_list);
14763 +static spinlock_t krxtimod_lock = SPIN_LOCK_UNLOCKED;
14765 +static int krxtimod(void *arg);
14767 +/*****************************************************************************/
14769 + * start the timeout daemon
14771 +int rxrpc_krxtimod_start(void)
14775 + ret = kernel_thread(krxtimod,NULL,0);
14779 + wait_for_completion(&krxtimod_alive);
14782 +} /* end rxrpc_krxtimod_start() */
14784 +/*****************************************************************************/
14786 + * stop the timeout daemon
14788 +void rxrpc_krxtimod_kill(void)
14790 + /* get rid of my daemon */
14791 + krxtimod_die = 1;
14792 + wake_up(&krxtimod_sleepq);
14793 + wait_for_completion(&krxtimod_dead);
14795 +} /* end rxrpc_krxtimod_kill() */
14797 +/*****************************************************************************/
14799 + * timeout processing daemon
14801 +static int krxtimod(void *arg)
14803 + DECLARE_WAITQUEUE(myself,current);
14805 + rxrpc_timer_t *timer;
14807 + printk("Started krxtimod %d\n",current->pid);
14808 + strcpy(current->comm,"krxtimod");
14812 + complete(&krxtimod_alive);
14814 + /* only certain signals are of interest */
14815 + spin_lock_irq(¤t->sigmask_lock);
14816 + siginitsetinv(¤t->blocked,0);
14817 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
14818 + recalc_sigpending();
14820 + recalc_sigpending(current);
14822 + spin_unlock_irq(¤t->sigmask_lock);
14824 + /* loop around looking for things to attend to */
14826 + set_current_state(TASK_INTERRUPTIBLE);
14827 + add_wait_queue(&krxtimod_sleepq,&myself);
14830 + unsigned long jif;
14831 + signed long timeout;
14833 + /* deal with the server being asked to die */
14834 + if (krxtimod_die) {
14835 + remove_wait_queue(&krxtimod_sleepq,&myself);
14837 + complete_and_exit(&krxtimod_dead,0);
14840 + /* discard pending signals */
14841 + while (signal_pending(current)) {
14844 + spin_lock_irq(¤t->sigmask_lock);
14845 + dequeue_signal(¤t->blocked,&sinfo);
14846 + spin_unlock_irq(¤t->sigmask_lock);
14849 + /* work out the time to elapse before the next event */
14850 + spin_lock(&krxtimod_lock);
14851 + if (list_empty(&krxtimod_list)) {
14852 + timeout = MAX_SCHEDULE_TIMEOUT;
14855 + timer = list_entry(krxtimod_list.next,rxrpc_timer_t,link);
14856 + timeout = timer->timo_jif;
14859 + if (time_before_eq(timeout,jif))
14863 + timeout = (long)timeout - (long)jiffies;
14866 + spin_unlock(&krxtimod_lock);
14868 + schedule_timeout(timeout);
14870 + set_current_state(TASK_INTERRUPTIBLE);
14873 + /* the thing on the front of the queue needs processing
14874 + * - we come here with the lock held and timer pointing to the expired entry
14877 + remove_wait_queue(&krxtimod_sleepq,&myself);
14878 + set_current_state(TASK_RUNNING);
14880 + _debug("@@@ Begin Timeout of %p",timer);
14882 + /* dequeue the timer */
14883 + list_del_init(&timer->link);
14884 + spin_unlock(&krxtimod_lock);
14886 + /* call the timeout function */
14887 + timer->ops->timed_out(timer);
14889 + _debug("@@@ End Timeout");
14892 +} /* end krxtimod() */
14894 +/*****************************************************************************/
14896 + * (re-)queue a timer
14898 +void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout)
14900 + struct list_head *_p;
14901 + rxrpc_timer_t *ptimer;
14903 + _enter("%p,%lu",timer,timeout);
14905 + spin_lock(&krxtimod_lock);
14907 + list_del(&timer->link);
14909 + /* the timer was deferred or reset - put it back in the queue at the right place */
14910 + timer->timo_jif = jiffies + timeout;
14912 + list_for_each(_p,&krxtimod_list) {
14913 + ptimer = list_entry(_p,rxrpc_timer_t,link);
14914 + if (time_before(timer->timo_jif,ptimer->timo_jif))
14918 + list_add_tail(&timer->link,_p); /* insert before stopping point */
14920 + spin_unlock(&krxtimod_lock);
14922 + wake_up(&krxtimod_sleepq);
14925 +} /* end rxrpc_krxtimod_queue_vlocation() */
14927 +/*****************************************************************************/
14929 + * dequeue a timer
14930 + * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
14932 +int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer)
14936 + _enter("%p",timer);
14938 + spin_lock(&krxtimod_lock);
14940 + if (list_empty(&timer->link))
14943 + list_del_init(&timer->link);
14945 + spin_unlock(&krxtimod_lock);
14947 + wake_up(&krxtimod_sleepq);
14949 + _leave(" = %d",ret);
14951 +} /* end rxrpc_krxtimod_del_timer() */
14952 diff -urNp linux-5240/net/rxrpc/main.c linux-5250/net/rxrpc/main.c
14953 --- linux-5240/net/rxrpc/main.c 1970-01-01 01:00:00.000000000 +0100
14954 +++ linux-5250/net/rxrpc/main.c
14956 +/* main.c: Rx RPC interface
14958 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
14959 + * Written by David Howells (dhowells@redhat.com)
14961 + * This program is free software; you can redistribute it and/or
14962 + * modify it under the terms of the GNU General Public License
14963 + * as published by the Free Software Foundation; either version
14964 + * 2 of the License, or (at your option) any later version.
14967 +#include <linux/module.h>
14968 +#include <linux/init.h>
14969 +#include <linux/sched.h>
14970 +#include <rxrpc/rxrpc.h>
14971 +#include <rxrpc/krxiod.h>
14972 +#include <rxrpc/krxsecd.h>
14973 +#include <rxrpc/krxtimod.h>
14974 +#include <rxrpc/transport.h>
14975 +#include <rxrpc/connection.h>
14976 +#include <rxrpc/call.h>
14977 +#include <rxrpc/message.h>
14978 +#include "internal.h"
14980 +static int rxrpc_initialise(void);
14981 +static void rxrpc_cleanup(void);
14983 +module_init(rxrpc_initialise);
14984 +module_exit(rxrpc_cleanup);
14986 +MODULE_DESCRIPTION("Rx RPC implementation");
14987 +MODULE_AUTHOR("Red Hat, Inc.");
14988 +MODULE_LICENSE("GPL");
14992 +/*****************************************************************************/
14994 + * initialise the Rx module
14996 +static int rxrpc_initialise(void)
15000 + /* my epoch value */
15001 + rxrpc_epoch = htonl(xtime.tv_sec);
15003 + /* register the /proc interface */
15004 +#ifdef CONFIG_PROC_FS
15005 + ret = rxrpc_proc_init();
15010 + /* register the sysctl files */
15011 +#ifdef CONFIG_SYSCTL
15012 + ret = rxrpc_sysctl_init();
15017 + /* start the krxtimod daemon */
15018 + ret = rxrpc_krxtimod_start();
15020 + goto error_sysctl;
15022 + /* start the krxiod daemon */
15023 + ret = rxrpc_krxiod_init();
15025 + goto error_krxtimod;
15027 + /* start the krxsecd daemon */
15028 + ret = rxrpc_krxsecd_init();
15030 + goto error_krxiod;
15037 + rxrpc_krxiod_kill();
15039 + rxrpc_krxtimod_kill();
15041 +#ifdef CONFIG_SYSCTL
15042 + rxrpc_sysctl_cleanup();
15045 +#ifdef CONFIG_PROC_FS
15046 + rxrpc_proc_cleanup();
15049 +} /* end rxrpc_initialise() */
15051 +/*****************************************************************************/
15053 + * clean up the Rx module
15055 +static void rxrpc_cleanup(void)
15059 + __RXACCT(printk("Outstanding Messages : %d\n",atomic_read(&rxrpc_message_count)));
15060 + __RXACCT(printk("Outstanding Calls : %d\n",atomic_read(&rxrpc_call_count)));
15061 + __RXACCT(printk("Outstanding Connections: %d\n",atomic_read(&rxrpc_connection_count)));
15062 + __RXACCT(printk("Outstanding Peers : %d\n",atomic_read(&rxrpc_peer_count)));
15063 + __RXACCT(printk("Outstanding Transports : %d\n",atomic_read(&rxrpc_transport_count)));
15065 + rxrpc_krxsecd_kill();
15066 + rxrpc_krxiod_kill();
15067 + rxrpc_krxtimod_kill();
15068 +#ifdef CONFIG_SYSCTL
15069 + rxrpc_sysctl_cleanup();
15071 +#ifdef CONFIG_PROC_FS
15072 + rxrpc_proc_cleanup();
15075 + __RXACCT(printk("Outstanding Messages : %d\n",atomic_read(&rxrpc_message_count)));
15076 + __RXACCT(printk("Outstanding Calls : %d\n",atomic_read(&rxrpc_call_count)));
15077 + __RXACCT(printk("Outstanding Connections: %d\n",atomic_read(&rxrpc_connection_count)));
15078 + __RXACCT(printk("Outstanding Peers : %d\n",atomic_read(&rxrpc_peer_count)));
15079 + __RXACCT(printk("Outstanding Transports : %d\n",atomic_read(&rxrpc_transport_count)));
15082 +} /* end rxrpc_cleanup() */
15083 diff -urNp linux-5240/net/rxrpc/Makefile linux-5250/net/rxrpc/Makefile
15084 --- linux-5240/net/rxrpc/Makefile 1970-01-01 01:00:00.000000000 +0100
15085 +++ linux-5250/net/rxrpc/Makefile
15088 +# Makefile for Linux kernel Rx RPC
15091 +export-objs := rxrpc_syms.o
15104 +#ifeq ($(CONFIG_PROC_FS),y)
15105 +rxrpc-objs += proc.o
15107 +#ifeq ($(CONFIG_SYSCTL),y)
15108 +rxrpc-objs += sysctl.o
15113 +# superfluous for 2.5, but needed for 2.4..
15114 +rxrpc.o: $(rxrpc-objs)
15115 + $(LD) -r -o $@ $(rxrpc-objs)
15117 +include $(TOPDIR)/Rules.make
15118 diff -urNp linux-5240/net/rxrpc/peer.c linux-5250/net/rxrpc/peer.c
15119 --- linux-5240/net/rxrpc/peer.c 1970-01-01 01:00:00.000000000 +0100
15120 +++ linux-5250/net/rxrpc/peer.c
15122 +/* peer.c: Rx RPC peer management
15124 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
15125 + * Written by David Howells (dhowells@redhat.com)
15127 + * This program is free software; you can redistribute it and/or
15128 + * modify it under the terms of the GNU General Public License
15129 + * as published by the Free Software Foundation; either version
15130 + * 2 of the License, or (at your option) any later version.
15133 +#include <linux/sched.h>
15134 +#include <linux/slab.h>
15135 +#include <linux/module.h>
15136 +#include <rxrpc/rxrpc.h>
15137 +#include <rxrpc/transport.h>
15138 +#include <rxrpc/peer.h>
15139 +#include <rxrpc/connection.h>
15140 +#include <rxrpc/call.h>
15141 +#include <rxrpc/message.h>
15142 +#include <linux/udp.h>
15143 +#include <linux/ip.h>
15144 +#include <net/sock.h>
15145 +#include <asm/uaccess.h>
15146 +#include <asm/div64.h>
15147 +#include "internal.h"
15149 +__RXACCT_DECL(atomic_t rxrpc_peer_count);
15150 +LIST_HEAD(rxrpc_peers);
15151 +DECLARE_RWSEM(rxrpc_peers_sem);
15153 +static void __rxrpc_peer_timeout(rxrpc_timer_t *timer)
15155 + struct rxrpc_peer *peer = list_entry(timer,struct rxrpc_peer,timeout);
15157 + _debug("Rx PEER TIMEOUT [%p{u=%d}]",peer,atomic_read(&peer->usage));
15159 + rxrpc_peer_do_timeout(peer);
15162 +static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = {
15163 + timed_out: __rxrpc_peer_timeout,
15166 +/*****************************************************************************/
15168 + * create a peer record
15170 +static int __rxrpc_create_peer(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer **_peer)
15172 + struct rxrpc_peer *peer;
15174 + _enter("%p,%08x",trans,ntohl(addr));
15176 + /* allocate and initialise a peer record */
15177 + peer = kmalloc(sizeof(struct rxrpc_peer),GFP_KERNEL);
15179 + _leave(" = -ENOMEM");
15183 + memset(peer,0,sizeof(struct rxrpc_peer));
15184 + atomic_set(&peer->usage,1);
15186 + INIT_LIST_HEAD(&peer->link);
15187 + INIT_LIST_HEAD(&peer->proc_link);
15188 + INIT_LIST_HEAD(&peer->conn_active);
15189 + INIT_LIST_HEAD(&peer->conn_graveyard);
15190 + spin_lock_init(&peer->conn_gylock);
15191 + init_waitqueue_head(&peer->conn_gy_waitq);
15192 + rwlock_init(&peer->conn_lock);
15193 + atomic_set(&peer->conn_count,0);
15194 + spin_lock_init(&peer->lock);
15195 + rxrpc_timer_init(&peer->timeout,&rxrpc_peer_timer_ops);
15197 + peer->addr.s_addr = addr;
15199 + peer->trans = trans;
15200 + peer->ops = trans->peer_ops;
15202 + __RXACCT(atomic_inc(&rxrpc_peer_count));
15204 + _leave(" = 0 (%p)",peer);
15207 +} /* end __rxrpc_create_peer() */
15209 +/*****************************************************************************/
15211 + * find a peer record on the specified transport
15212 + * - returns (if successful) with peer record usage incremented
15213 + * - resurrects it from the graveyard if found there
15215 +int rxrpc_peer_lookup(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer **_peer)
15217 + struct rxrpc_peer *peer, *candidate = NULL;
15218 + struct list_head *_p;
15221 + _enter("%p{%hu},%08x",trans,trans->port,ntohl(addr));
15223 + /* [common case] search the transport's active list first */
15224 + read_lock(&trans->peer_lock);
15225 + list_for_each(_p,&trans->peer_active) {
15226 + peer = list_entry(_p,struct rxrpc_peer,link);
15227 + if (peer->addr.s_addr==addr)
15228 + goto found_active;
15230 + read_unlock(&trans->peer_lock);
15232 + /* [uncommon case] not active - create a candidate for a new record */
15233 + ret = __rxrpc_create_peer(trans,addr,&candidate);
15235 + _leave(" = %d",ret);
15239 + /* search the active list again, just in case it appeared whilst we were busy */
15240 + write_lock(&trans->peer_lock);
15241 + list_for_each(_p,&trans->peer_active) {
15242 + peer = list_entry(_p,struct rxrpc_peer,link);
15243 + if (peer->addr.s_addr==addr)
15244 + goto found_active_second_chance;
15247 + /* search the transport's graveyard list */
15248 + spin_lock(&trans->peer_gylock);
15249 + list_for_each(_p,&trans->peer_graveyard) {
15250 + peer = list_entry(_p,struct rxrpc_peer,link);
15251 + if (peer->addr.s_addr==addr)
15252 + goto found_in_graveyard;
15254 + spin_unlock(&trans->peer_gylock);
15256 + /* we can now add the new candidate to the list
15257 + * - tell the application layer that this peer has been added
15259 + rxrpc_get_transport(trans);
15260 + peer = candidate;
15261 + candidate = NULL;
15263 + if (peer->ops && peer->ops->adding) {
15264 + ret = peer->ops->adding(peer);
15266 + write_unlock(&trans->peer_lock);
15267 + __RXACCT(atomic_dec(&rxrpc_peer_count));
15269 + rxrpc_put_transport(trans);
15270 + _leave(" = %d",ret);
15275 + atomic_inc(&trans->peer_count);
15278 + list_add_tail(&peer->link,&trans->peer_active);
15281 + write_unlock(&trans->peer_lock);
15284 + __RXACCT(atomic_dec(&rxrpc_peer_count));
15285 + kfree(candidate);
15288 + if (list_empty(&peer->proc_link)) {
15289 + down_write(&rxrpc_peers_sem);
15290 + list_add_tail(&peer->proc_link,&rxrpc_peers);
15291 + up_write(&rxrpc_peers_sem);
15297 + _leave(" = 0 (%p{u=%d cc=%d})",
15298 + peer,atomic_read(&peer->usage),atomic_read(&peer->conn_count));
15301 + /* handle the peer being found in the active list straight off */
15303 + rxrpc_get_peer(peer);
15304 + read_unlock(&trans->peer_lock);
15307 + /* handle resurrecting a peer from the graveyard */
15308 + found_in_graveyard:
15309 + rxrpc_get_peer(peer);
15310 + rxrpc_get_transport(peer->trans);
15311 + rxrpc_krxtimod_del_timer(&peer->timeout);
15312 + list_del_init(&peer->link);
15313 + spin_unlock(&trans->peer_gylock);
15314 + goto make_active;
15316 + /* handle finding the peer on the second time through the active list */
15317 + found_active_second_chance:
15318 + rxrpc_get_peer(peer);
15319 + goto success_uwfree;
15321 +} /* end rxrpc_peer_lookup() */
15323 +/*****************************************************************************/
15325 + * finish with a peer record
15326 + * - it gets sent to the graveyard from where it can be resurrected or timed out
15328 +void rxrpc_put_peer(struct rxrpc_peer *peer)
15330 + struct rxrpc_transport *trans = peer->trans;
15332 + _enter("%p{cc=%d a=%08x}",peer,atomic_read(&peer->conn_count),ntohl(peer->addr.s_addr));
15334 + /* sanity check */
15335 + if (atomic_read(&peer->usage)<=0)
15338 + write_lock(&trans->peer_lock);
15339 + spin_lock(&trans->peer_gylock);
15340 + if (likely(!atomic_dec_and_test(&peer->usage))) {
15341 + spin_unlock(&trans->peer_gylock);
15342 + write_unlock(&trans->peer_lock);
15347 + /* move to graveyard queue */
15348 + list_del(&peer->link);
15349 + write_unlock(&trans->peer_lock);
15351 + list_add_tail(&peer->link,&trans->peer_graveyard);
15353 + if (!list_empty(&peer->conn_active)) BUG();
15355 + /* discard in 600 secs */
15356 + rxrpc_krxtimod_add_timer(&peer->timeout,100*HZ);
15358 + spin_unlock(&trans->peer_gylock);
15360 + rxrpc_put_transport(trans);
15362 + _leave(" [killed]");
15363 +} /* end rxrpc_put_peer() */
15365 +/*****************************************************************************/
15367 + * handle a peer timing out in the graveyard
15368 + * - called from krxtimod
15370 +void rxrpc_peer_do_timeout(struct rxrpc_peer *peer)
15372 + struct rxrpc_transport *trans = peer->trans;
15374 + _enter("%p{u=%d cc=%d a=%08x}",
15375 + peer,atomic_read(&peer->usage),atomic_read(&peer->conn_count),
15376 + ntohl(peer->addr.s_addr));
15378 + if (atomic_read(&peer->usage)<0)
15381 + /* remove from graveyard if still dead */
15382 + spin_lock(&trans->peer_gylock);
15383 + if (atomic_read(&peer->usage)==0)
15384 + list_del_init(&peer->link);
15387 + spin_unlock(&trans->peer_gylock);
15391 + return; /* resurrected */
15394 + /* clear all connections on this peer */
15395 + rxrpc_conn_clearall(peer);
15397 + if (!list_empty(&peer->conn_active)) BUG();
15398 + if (!list_empty(&peer->conn_graveyard)) BUG();
15400 + /* inform the application layer */
15401 + if (peer->ops && peer->ops->discarding)
15402 + peer->ops->discarding(peer);
15404 + if (!list_empty(&peer->proc_link)) {
15405 + down_write(&rxrpc_peers_sem);
15406 + list_del(&peer->proc_link);
15407 + up_write(&rxrpc_peers_sem);
15410 + __RXACCT(atomic_dec(&rxrpc_peer_count));
15413 + /* if the graveyard is now empty, wake up anyone waiting for that */
15414 + if (atomic_dec_and_test(&trans->peer_count))
15415 + wake_up(&trans->peer_gy_waitq);
15417 + _leave(" [destroyed]");
15418 +} /* end rxrpc_peer_do_timeout() */
15420 +/*****************************************************************************/
15422 + * clear all peer records from a transport endpoint
15424 +void rxrpc_peer_clearall(struct rxrpc_transport *trans)
15426 + DECLARE_WAITQUEUE(myself,current);
15428 + struct rxrpc_peer *peer;
15431 + _enter("%p",trans);
15433 + /* there shouldn't be any active peers remaining */
15434 + if (!list_empty(&trans->peer_active))
15437 + /* manually timeout all peers in the graveyard */
15438 + spin_lock(&trans->peer_gylock);
15439 + while (!list_empty(&trans->peer_graveyard)) {
15440 + peer = list_entry(trans->peer_graveyard.next,struct rxrpc_peer,link);
15441 + _debug("Clearing peer %p\n",peer);
15442 + err = rxrpc_krxtimod_del_timer(&peer->timeout);
15443 + spin_unlock(&trans->peer_gylock);
15446 + rxrpc_peer_do_timeout(peer);
15448 + spin_lock(&trans->peer_gylock);
15450 + spin_unlock(&trans->peer_gylock);
15452 + /* wait for the the peer graveyard to be completely cleared */
15453 + set_current_state(TASK_UNINTERRUPTIBLE);
15454 + add_wait_queue(&trans->peer_gy_waitq,&myself);
15456 + while (atomic_read(&trans->peer_count)!=0) {
15458 + set_current_state(TASK_UNINTERRUPTIBLE);
15461 + remove_wait_queue(&trans->peer_gy_waitq,&myself);
15462 + set_current_state(TASK_RUNNING);
15466 +} /* end rxrpc_peer_clearall() */
15468 +/*****************************************************************************/
15470 + * calculate and cache the Round-Trip-Time for a message and its response
15472 +void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
15473 + struct rxrpc_message *msg,
15474 + struct rxrpc_message *resp)
15476 + unsigned long long rtt;
15479 + _enter("%p,%p,%p",peer,msg,resp);
15481 + /* calculate the latest RTT */
15482 + rtt = resp->stamp.tv_sec - msg->stamp.tv_sec;
15483 + rtt *= 1000000UL;
15484 + rtt += resp->stamp.tv_usec - msg->stamp.tv_usec;
15486 + /* add to cache */
15487 + peer->rtt_cache[peer->rtt_point] = rtt;
15488 + peer->rtt_point++;
15489 + peer->rtt_point %= RXRPC_RTT_CACHE_SIZE;
15491 + if (peer->rtt_usage<RXRPC_RTT_CACHE_SIZE) peer->rtt_usage++;
15493 + /* recalculate RTT */
15494 + for (loop=peer->rtt_usage-1; loop>=0; loop--)
15495 + rtt += peer->rtt_cache[loop];
15497 + peer->rtt = do_div(rtt,peer->rtt_usage);
15499 + _leave(" RTT=%lu.%lums",peer->rtt/1000,peer->rtt%1000);
15501 +} /* end rxrpc_peer_calculate_rtt() */
15502 diff -urNp linux-5240/net/rxrpc/proc.c linux-5250/net/rxrpc/proc.c
15503 --- linux-5240/net/rxrpc/proc.c 1970-01-01 01:00:00.000000000 +0100
15504 +++ linux-5250/net/rxrpc/proc.c
15506 +/* proc.c: /proc interface for RxRPC
15508 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
15509 + * Written by David Howells (dhowells@redhat.com)
15511 + * This program is free software; you can redistribute it and/or
15512 + * modify it under the terms of the GNU General Public License
15513 + * as published by the Free Software Foundation; either version
15514 + * 2 of the License, or (at your option) any later version.
15517 +#include <linux/sched.h>
15518 +#include <linux/slab.h>
15519 +#include <linux/module.h>
15520 +#include <linux/proc_fs.h>
15521 +#include <linux/seq_file.h>
15522 +#include <rxrpc/rxrpc.h>
15523 +#include <rxrpc/transport.h>
15524 +#include <rxrpc/peer.h>
15525 +#include <rxrpc/connection.h>
15526 +#include <rxrpc/call.h>
15527 +#include <rxrpc/message.h>
15528 +#include "internal.h"
15530 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
15531 +static inline struct proc_dir_entry *PDE(const struct inode *inode)
15533 + return (struct proc_dir_entry *)inode->u.generic_ip;
15537 +static struct proc_dir_entry *proc_rxrpc;
15539 +static int rxrpc_proc_transports_open(struct inode *inode, struct file *file);
15540 +static void *rxrpc_proc_transports_start(struct seq_file *p, loff_t *pos);
15541 +static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos);
15542 +static void rxrpc_proc_transports_stop(struct seq_file *p, void *v);
15543 +static int rxrpc_proc_transports_show(struct seq_file *m, void *v);
15545 +static struct seq_operations rxrpc_proc_transports_ops = {
15546 + start: rxrpc_proc_transports_start,
15547 + next: rxrpc_proc_transports_next,
15548 + stop: rxrpc_proc_transports_stop,
15549 + show: rxrpc_proc_transports_show,
15552 +static struct file_operations rxrpc_proc_transports_fops = {
15553 + open: rxrpc_proc_transports_open,
15555 + llseek: seq_lseek,
15556 + release: seq_release,
15559 +static int rxrpc_proc_peers_open(struct inode *inode, struct file *file);
15560 +static void *rxrpc_proc_peers_start(struct seq_file *p, loff_t *pos);
15561 +static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos);
15562 +static void rxrpc_proc_peers_stop(struct seq_file *p, void *v);
15563 +static int rxrpc_proc_peers_show(struct seq_file *m, void *v);
15565 +static struct seq_operations rxrpc_proc_peers_ops = {
15566 + start: rxrpc_proc_peers_start,
15567 + next: rxrpc_proc_peers_next,
15568 + stop: rxrpc_proc_peers_stop,
15569 + show: rxrpc_proc_peers_show,
15572 +static struct file_operations rxrpc_proc_peers_fops = {
15573 + open: rxrpc_proc_peers_open,
15575 + llseek: seq_lseek,
15576 + release: seq_release,
15579 +static int rxrpc_proc_conns_open(struct inode *inode, struct file *file);
15580 +static void *rxrpc_proc_conns_start(struct seq_file *p, loff_t *pos);
15581 +static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos);
15582 +static void rxrpc_proc_conns_stop(struct seq_file *p, void *v);
15583 +static int rxrpc_proc_conns_show(struct seq_file *m, void *v);
15585 +static struct seq_operations rxrpc_proc_conns_ops = {
15586 + start: rxrpc_proc_conns_start,
15587 + next: rxrpc_proc_conns_next,
15588 + stop: rxrpc_proc_conns_stop,
15589 + show: rxrpc_proc_conns_show,
15592 +static struct file_operations rxrpc_proc_conns_fops = {
15593 + open: rxrpc_proc_conns_open,
15595 + llseek: seq_lseek,
15596 + release: seq_release,
15599 +static int rxrpc_proc_calls_open(struct inode *inode, struct file *file);
15600 +static void *rxrpc_proc_calls_start(struct seq_file *p, loff_t *pos);
15601 +static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos);
15602 +static void rxrpc_proc_calls_stop(struct seq_file *p, void *v);
15603 +static int rxrpc_proc_calls_show(struct seq_file *m, void *v);
15605 +static struct seq_operations rxrpc_proc_calls_ops = {
15606 + start: rxrpc_proc_calls_start,
15607 + next: rxrpc_proc_calls_next,
15608 + stop: rxrpc_proc_calls_stop,
15609 + show: rxrpc_proc_calls_show,
15612 +static struct file_operations rxrpc_proc_calls_fops = {
15613 + open: rxrpc_proc_calls_open,
15615 + llseek: seq_lseek,
15616 + release: seq_release,
15619 +static const char *rxrpc_call_states7[] = {
15632 +static const char *rxrpc_call_error_states7[] = {
15640 +/*****************************************************************************/
15642 + * initialise the /proc/net/rxrpc/ directory
15644 +int rxrpc_proc_init(void)
15646 + struct proc_dir_entry *p;
15648 + proc_rxrpc = proc_mkdir("rxrpc",proc_net);
15651 + proc_rxrpc->owner = THIS_MODULE;
15653 + p = create_proc_entry("calls",0,proc_rxrpc);
15656 + p->proc_fops = &rxrpc_proc_calls_fops;
15657 + p->owner = THIS_MODULE;
15659 + p = create_proc_entry("connections",0,proc_rxrpc);
15661 + goto error_calls;
15662 + p->proc_fops = &rxrpc_proc_conns_fops;
15663 + p->owner = THIS_MODULE;
15665 + p = create_proc_entry("peers",0,proc_rxrpc);
15667 + goto error_calls;
15668 + p->proc_fops = &rxrpc_proc_peers_fops;
15669 + p->owner = THIS_MODULE;
15671 + p = create_proc_entry("transports",0,proc_rxrpc);
15673 + goto error_conns;
15674 + p->proc_fops = &rxrpc_proc_transports_fops;
15675 + p->owner = THIS_MODULE;
15680 + remove_proc_entry("conns",proc_rxrpc);
15682 + remove_proc_entry("calls",proc_rxrpc);
15684 + remove_proc_entry("rxrpc",proc_net);
15687 +} /* end rxrpc_proc_init() */
15689 +/*****************************************************************************/
15691 + * clean up the /proc/net/rxrpc/ directory
15693 +void rxrpc_proc_cleanup(void)
15695 + remove_proc_entry("transports",proc_rxrpc);
15696 + remove_proc_entry("peers",proc_rxrpc);
15697 + remove_proc_entry("connections",proc_rxrpc);
15698 + remove_proc_entry("calls",proc_rxrpc);
15700 + remove_proc_entry("rxrpc",proc_net);
15702 +} /* end rxrpc_proc_cleanup() */
15704 +/*****************************************************************************/
15706 + * open "/proc/net/rxrpc/transports" which provides a summary of extant transports
15708 +static int rxrpc_proc_transports_open(struct inode *inode, struct file *file)
15710 + struct seq_file *m;
15713 + ret = seq_open(file,&rxrpc_proc_transports_ops);
15717 + m = file->private_data;
15718 + m->private = PDE(inode)->data;
15721 +} /* end rxrpc_proc_transports_open() */
15723 +/*****************************************************************************/
15725 + * set up the iterator to start reading from the transports list and return the first item
15727 +static void *rxrpc_proc_transports_start(struct seq_file *m, loff_t *_pos)
15729 + struct list_head *_p;
15730 + loff_t pos = *_pos;
15732 + /* lock the list against modification */
15733 + down_read(&rxrpc_proc_transports_sem);
15735 + /* allow for the header line */
15737 + return (void *)1;
15740 + /* find the n'th element in the list */
15741 + list_for_each(_p,&rxrpc_proc_transports)
15745 + return _p!=&rxrpc_proc_transports ? _p : NULL;
15746 +} /* end rxrpc_proc_transports_start() */
15748 +/*****************************************************************************/
15750 + * move to next call in transports list
15752 +static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos)
15754 + struct list_head *_p;
15759 + _p = v==(void*)1 ? rxrpc_proc_transports.next : _p->next;
15761 + return _p!=&rxrpc_proc_transports ? _p : NULL;
15762 +} /* end rxrpc_proc_transports_next() */
15764 +/*****************************************************************************/
15766 + * clean up after reading from the transports list
15768 +static void rxrpc_proc_transports_stop(struct seq_file *p, void *v)
15770 + up_read(&rxrpc_proc_transports_sem);
15772 +} /* end rxrpc_proc_transports_stop() */
15774 +/*****************************************************************************/
15776 + * display a header line followed by a load of call lines
15778 +static int rxrpc_proc_transports_show(struct seq_file *m, void *v)
15780 + struct rxrpc_transport *trans = list_entry(v,struct rxrpc_transport,proc_link);
15782 + /* display header on line 1 */
15783 + if (v == (void *)1) {
15784 + seq_puts(m, "LOCAL USE\n");
15788 + /* display one transport per line on subsequent lines */
15789 + seq_printf(m,"%5hu %3d\n",
15791 + atomic_read(&trans->usage)
15795 +} /* end rxrpc_proc_transports_show() */
15797 +/*****************************************************************************/
15799 + * open "/proc/net/rxrpc/peers" which provides a summary of extant peers
15801 +static int rxrpc_proc_peers_open(struct inode *inode, struct file *file)
15803 + struct seq_file *m;
15806 + ret = seq_open(file,&rxrpc_proc_peers_ops);
15810 + m = file->private_data;
15811 + m->private = PDE(inode)->data;
15814 +} /* end rxrpc_proc_peers_open() */
15816 +/*****************************************************************************/
15818 + * set up the iterator to start reading from the peers list and return the first item
15820 +static void *rxrpc_proc_peers_start(struct seq_file *m, loff_t *_pos)
15822 + struct list_head *_p;
15823 + loff_t pos = *_pos;
15825 + /* lock the list against modification */
15826 + down_read(&rxrpc_peers_sem);
15828 + /* allow for the header line */
15830 + return (void *)1;
15833 + /* find the n'th element in the list */
15834 + list_for_each(_p,&rxrpc_peers)
15838 + return _p!=&rxrpc_peers ? _p : NULL;
15839 +} /* end rxrpc_proc_peers_start() */
15841 +/*****************************************************************************/
15843 + * move to next conn in peers list
15845 +static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos)
15847 + struct list_head *_p;
15852 + _p = v==(void*)1 ? rxrpc_peers.next : _p->next;
15854 + return _p!=&rxrpc_peers ? _p : NULL;
15855 +} /* end rxrpc_proc_peers_next() */
15857 +/*****************************************************************************/
15859 + * clean up after reading from the peers list
15861 +static void rxrpc_proc_peers_stop(struct seq_file *p, void *v)
15863 + up_read(&rxrpc_peers_sem);
15865 +} /* end rxrpc_proc_peers_stop() */
15867 +/*****************************************************************************/
15869 + * display a header line followed by a load of conn lines
15871 +static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
15873 + struct rxrpc_peer *peer = list_entry(v,struct rxrpc_peer,proc_link);
15874 + signed long timeout;
15876 + /* display header on line 1 */
15877 + if (v == (void *)1) {
15878 + seq_puts(m,"LOCAL REMOTE USAGE CONNS TIMEOUT MTU RTT(uS)\n");
15882 + /* display one peer per line on subsequent lines */
15884 + if (!list_empty(&peer->timeout.link))
15885 + timeout = (signed long)peer->timeout.timo_jif - (signed long)jiffies;
15887 + seq_printf(m,"%5hu %08x %5d %5d %8ld %5u %7lu\n",
15888 + peer->trans->port,
15889 + ntohl(peer->addr.s_addr),
15890 + atomic_read(&peer->usage),
15891 + atomic_read(&peer->conn_count),
15898 +} /* end rxrpc_proc_peers_show() */
15900 +/*****************************************************************************/
15902 + * open "/proc/net/rxrpc/connections" which provides a summary of extant connections
15904 +static int rxrpc_proc_conns_open(struct inode *inode, struct file *file)
15906 + struct seq_file *m;
15909 + ret = seq_open(file,&rxrpc_proc_conns_ops);
15913 + m = file->private_data;
15914 + m->private = PDE(inode)->data;
15917 +} /* end rxrpc_proc_conns_open() */
15919 +/*****************************************************************************/
15921 + * set up the iterator to start reading from the conns list and return the first item
15923 +static void *rxrpc_proc_conns_start(struct seq_file *m, loff_t *_pos)
15925 + struct list_head *_p;
15926 + loff_t pos = *_pos;
15928 + /* lock the list against modification */
15929 + down_read(&rxrpc_conns_sem);
15931 + /* allow for the header line */
15933 + return (void *)1;
15936 + /* find the n'th element in the list */
15937 + list_for_each(_p,&rxrpc_conns)
15941 + return _p!=&rxrpc_conns ? _p : NULL;
15942 +} /* end rxrpc_proc_conns_start() */
15944 +/*****************************************************************************/
15946 + * move to next conn in conns list
15948 +static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos)
15950 + struct list_head *_p;
15955 + _p = v==(void*)1 ? rxrpc_conns.next : _p->next;
15957 + return _p!=&rxrpc_conns ? _p : NULL;
15958 +} /* end rxrpc_proc_conns_next() */
15960 +/*****************************************************************************/
15962 + * clean up after reading from the conns list
15964 +static void rxrpc_proc_conns_stop(struct seq_file *p, void *v)
15966 + up_read(&rxrpc_conns_sem);
15968 +} /* end rxrpc_proc_conns_stop() */
15970 +/*****************************************************************************/
15972 + * display a header line followed by a load of conn lines
15974 +static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
15976 + struct rxrpc_connection *conn = list_entry(v,struct rxrpc_connection,proc_link);
15977 + signed long timeout;
15979 + /* display header on line 1 */
15980 + if (v == (void *)1) {
15982 + "LOCAL REMOTE RPORT SRVC CONN END SERIALNO CALLNO MTU TIMEOUT"
15987 + /* display one conn per line on subsequent lines */
15989 + if (!list_empty(&conn->timeout.link))
15990 + timeout = (signed long)conn->timeout.timo_jif - (signed long)jiffies;
15992 + seq_printf(m,"%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5u %8ld\n",
15993 + conn->trans->port,
15994 + ntohl(conn->addr.sin_addr.s_addr),
15995 + ntohs(conn->addr.sin_port),
15996 + ntohs(conn->service_id),
15997 + ntohl(conn->conn_id),
15998 + conn->out_clientflag ? "CLT" : "SRV",
15999 + conn->serial_counter,
16000 + conn->call_counter,
16006 +} /* end rxrpc_proc_conns_show() */
16008 +/*****************************************************************************/
16010 + * open "/proc/net/rxrpc/calls" which provides a summary of extant calls
16012 +static int rxrpc_proc_calls_open(struct inode *inode, struct file *file)
16014 + struct seq_file *m;
16017 + ret = seq_open(file,&rxrpc_proc_calls_ops);
16021 + m = file->private_data;
16022 + m->private = PDE(inode)->data;
16025 +} /* end rxrpc_proc_calls_open() */
16027 +/*****************************************************************************/
16029 + * set up the iterator to start reading from the calls list and return the first item
16031 +static void *rxrpc_proc_calls_start(struct seq_file *m, loff_t *_pos)
16033 + struct list_head *_p;
16034 + loff_t pos = *_pos;
16036 + /* lock the list against modification */
16037 + down_read(&rxrpc_calls_sem);
16039 + /* allow for the header line */
16041 + return (void *)1;
16044 + /* find the n'th element in the list */
16045 + list_for_each(_p,&rxrpc_calls)
16049 + return _p!=&rxrpc_calls ? _p : NULL;
16050 +} /* end rxrpc_proc_calls_start() */
16052 +/*****************************************************************************/
16054 + * move to next call in calls list
16056 +static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos)
16058 + struct list_head *_p;
16063 + _p = v==(void*)1 ? rxrpc_calls.next : _p->next;
16065 + return _p!=&rxrpc_calls ? _p : NULL;
16066 +} /* end rxrpc_proc_calls_next() */
16068 +/*****************************************************************************/
16070 + * clean up after reading from the calls list
16072 +static void rxrpc_proc_calls_stop(struct seq_file *p, void *v)
16074 + up_read(&rxrpc_calls_sem);
16076 +} /* end rxrpc_proc_calls_stop() */
16078 +/*****************************************************************************/
16080 + * display a header line followed by a load of call lines
16082 +static int rxrpc_proc_calls_show(struct seq_file *m, void *v)
16084 + struct rxrpc_call *call = list_entry(v,struct rxrpc_call,call_link);
16086 + /* display header on line 1 */
16087 + if (v == (void *)1) {
16089 + "LOCAL REMOT SRVC CONN CALL DIR USE "
16090 + " L STATE OPCODE ABORT ERRNO\n"
16095 + /* display one call per line on subsequent lines */
16097 + "%5hu %5hu %04hx %08x %08x %s %3u%c"
16098 + " %c %-7.7s %6d %08x %5d\n",
16099 + call->conn->trans->port,
16100 + ntohs(call->conn->addr.sin_port),
16101 + ntohs(call->conn->service_id),
16102 + ntohl(call->conn->conn_id),
16103 + ntohl(call->call_id),
16104 + call->conn->service ? "SVC" : "CLT",
16105 + atomic_read(&call->usage),
16106 + waitqueue_active(&call->waitq) ? 'w' : ' ',
16107 + call->app_last_rcv ? 'Y' : '-',
16108 + (call->app_call_state!=RXRPC_CSTATE_ERROR ?
16109 + rxrpc_call_states7[call->app_call_state] :
16110 + rxrpc_call_error_states7[call->app_err_state]),
16111 + call->app_opcode,
16112 + call->app_abort_code,
16117 +} /* end rxrpc_proc_calls_show() */
16118 diff -urNp linux-5240/net/rxrpc/rxrpc_syms.c linux-5250/net/rxrpc/rxrpc_syms.c
16119 --- linux-5240/net/rxrpc/rxrpc_syms.c 1970-01-01 01:00:00.000000000 +0100
16120 +++ linux-5250/net/rxrpc/rxrpc_syms.c
16122 +/* rxrpc_syms.c: exported Rx RPC layer interface symbols
16124 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
16125 + * Written by David Howells (dhowells@redhat.com)
16127 + * This program is free software; you can redistribute it and/or
16128 + * modify it under the terms of the GNU General Public License
16129 + * as published by the Free Software Foundation; either version
16130 + * 2 of the License, or (at your option) any later version.
16133 +#include <linux/config.h>
16134 +#include <linux/module.h>
16136 +#include <rxrpc/transport.h>
16137 +#include <rxrpc/connection.h>
16138 +#include <rxrpc/call.h>
16139 +#include <rxrpc/krxiod.h>
16142 +EXPORT_SYMBOL(rxrpc_call_rcv_timeout);
16143 +EXPORT_SYMBOL(rxrpc_call_acks_timeout);
16144 +EXPORT_SYMBOL(rxrpc_call_dfr_ack_timeout);
16145 +EXPORT_SYMBOL(rxrpc_call_max_resend);
16146 +EXPORT_SYMBOL(rxrpc_call_states);
16147 +EXPORT_SYMBOL(rxrpc_call_error_states);
16149 +EXPORT_SYMBOL(rxrpc_create_call);
16150 +EXPORT_SYMBOL(rxrpc_incoming_call);
16151 +EXPORT_SYMBOL(rxrpc_put_call);
16152 +EXPORT_SYMBOL(rxrpc_call_abort);
16153 +EXPORT_SYMBOL(rxrpc_call_read_data);
16154 +EXPORT_SYMBOL(rxrpc_call_write_data);
16155 +EXPORT_SYMBOL(rxrpc_call_flush);
16157 +/* connection.c */
16158 +EXPORT_SYMBOL(rxrpc_create_connection);
16159 +EXPORT_SYMBOL(rxrpc_put_connection);
16162 +EXPORT_SYMBOL(rxrpc_ktrace);
16163 +EXPORT_SYMBOL(rxrpc_kdebug);
16164 +EXPORT_SYMBOL(rxrpc_kproto);
16165 +EXPORT_SYMBOL(rxrpc_knet);
16168 +EXPORT_SYMBOL(rxrpc_create_transport);
16169 +EXPORT_SYMBOL(rxrpc_clear_transport);
16170 +EXPORT_SYMBOL(rxrpc_put_transport);
16171 +EXPORT_SYMBOL(rxrpc_add_service);
16172 +EXPORT_SYMBOL(rxrpc_del_service);
16173 diff -urNp linux-5240/net/rxrpc/sysctl.c linux-5250/net/rxrpc/sysctl.c
16174 --- linux-5240/net/rxrpc/sysctl.c 1970-01-01 01:00:00.000000000 +0100
16175 +++ linux-5250/net/rxrpc/sysctl.c
16177 +/* sysctl.c: Rx RPC control
16179 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
16180 + * Written by David Howells (dhowells@redhat.com)
16182 + * This program is free software; you can redistribute it and/or
16183 + * modify it under the terms of the GNU General Public License
16184 + * as published by the Free Software Foundation; either version
16185 + * 2 of the License, or (at your option) any later version.
16188 +#include <linux/config.h>
16189 +#include <linux/sched.h>
16190 +#include <linux/slab.h>
16191 +#include <linux/module.h>
16192 +#include <linux/sysctl.h>
16193 +#include <linux/config.h>
16194 +#include <rxrpc/types.h>
16195 +#include <rxrpc/rxrpc.h>
16196 +#include <asm/errno.h>
16197 +#include "internal.h"
16204 +#ifdef CONFIG_SYSCTL
16205 +static struct ctl_table_header *rxrpc_sysctl = NULL;
16207 +static ctl_table rxrpc_sysctl_table[] = {
16208 + { 1, "kdebug", &rxrpc_kdebug, sizeof(int), 0644, NULL, &proc_dointvec },
16209 + { 2, "ktrace", &rxrpc_ktrace, sizeof(int), 0644, NULL, &proc_dointvec },
16210 + { 3, "kproto", &rxrpc_kproto, sizeof(int), 0644, NULL, &proc_dointvec },
16211 + { 4, "knet", &rxrpc_knet, sizeof(int), 0644, NULL, &proc_dointvec },
16215 +static ctl_table rxrpc_dir_sysctl_table[] = {
16216 + { 1, "rxrpc", NULL, 0, 0555, rxrpc_sysctl_table },
16219 +#endif /* CONFIG_SYSCTL */
16221 +/*****************************************************************************/
16223 + * initialise the sysctl stuff for Rx RPC
16225 +int rxrpc_sysctl_init(void)
16227 +#ifdef CONFIG_SYSCTL
16228 + rxrpc_sysctl = register_sysctl_table(rxrpc_dir_sysctl_table,0);
16229 + if (!rxrpc_sysctl)
16231 +#endif /* CONFIG_SYSCTL */
16234 +} /* end rxrpc_sysctl_init() */
16236 +/*****************************************************************************/
16238 + * clean up the sysctl stuff for Rx RPC
16240 +void rxrpc_sysctl_cleanup(void)
16242 +#ifdef CONFIG_SYSCTL
16243 + if (rxrpc_sysctl) {
16244 + unregister_sysctl_table(rxrpc_sysctl);
16245 + rxrpc_sysctl = NULL;
16247 +#endif /* CONFIG_SYSCTL */
16249 +} /* end rxrpc_sysctl_cleanup() */
16250 diff -urNp linux-5240/net/rxrpc/transport.c linux-5250/net/rxrpc/transport.c
16251 --- linux-5240/net/rxrpc/transport.c 1970-01-01 01:00:00.000000000 +0100
16252 +++ linux-5250/net/rxrpc/transport.c
16254 +/* transport.c: Rx Transport routines
16256 + * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
16257 + * Written by David Howells (dhowells@redhat.com)
16259 + * This program is free software; you can redistribute it and/or
16260 + * modify it under the terms of the GNU General Public License
16261 + * as published by the Free Software Foundation; either version
16262 + * 2 of the License, or (at your option) any later version.
16265 +#include <linux/sched.h>
16266 +#include <linux/slab.h>
16267 +#include <linux/module.h>
16268 +#include <rxrpc/transport.h>
16269 +#include <rxrpc/peer.h>
16270 +#include <rxrpc/connection.h>
16271 +#include <rxrpc/call.h>
16272 +#include <rxrpc/message.h>
16273 +#include <rxrpc/krxiod.h>
16274 +#include <rxrpc/krxsecd.h>
16275 +#include <linux/udp.h>
16276 +#include <linux/in.h>
16277 +#include <linux/in6.h>
16278 +#include <linux/icmp.h>
16279 +#include <net/sock.h>
16280 +#include <net/ip.h>
16281 +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
16282 +#include <linux/ipv6.h> /* this should _really_ be in errqueue.h.. */
16284 +#include <linux/errqueue.h>
16285 +#include <asm/uaccess.h>
16286 +#include <asm/checksum.h>
16287 +#include "internal.h"
16290 + struct cmsghdr cmsg; /* control message header */
16291 + struct sock_extended_err ee; /* extended error information */
16292 + struct sockaddr_in icmp_src; /* ICMP packet source address */
16295 +static spinlock_t rxrpc_transports_lock = SPIN_LOCK_UNLOCKED;
16296 +static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
16298 +__RXACCT_DECL(atomic_t rxrpc_transport_count);
16299 +LIST_HEAD(rxrpc_proc_transports);
16300 +DECLARE_RWSEM(rxrpc_proc_transports_sem);
16302 +static void rxrpc_data_ready(struct sock *sk, int count);
16303 +static void rxrpc_error_report(struct sock *sk);
16304 +static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
16305 + struct list_head *msgq);
16306 +static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
16308 +/*****************************************************************************/
16310 + * create a new transport endpoint using the specified UDP port
16312 +int rxrpc_create_transport(unsigned short port, struct rxrpc_transport **_trans)
16314 + struct rxrpc_transport *trans;
16315 + struct sockaddr_in sin;
16316 + mm_segment_t oldfs;
16317 + struct sock *sock;
16320 + _enter("%hu",port);
16322 + trans = kmalloc(sizeof(struct rxrpc_transport),GFP_KERNEL);
16326 + memset(trans,0,sizeof(struct rxrpc_transport));
16327 + atomic_set(&trans->usage,1);
16328 + INIT_LIST_HEAD(&trans->services);
16329 + INIT_LIST_HEAD(&trans->link);
16330 + INIT_LIST_HEAD(&trans->krxiodq_link);
16331 + spin_lock_init(&trans->lock);
16332 + INIT_LIST_HEAD(&trans->peer_active);
16333 + INIT_LIST_HEAD(&trans->peer_graveyard);
16334 + spin_lock_init(&trans->peer_gylock);
16335 + init_waitqueue_head(&trans->peer_gy_waitq);
16336 + rwlock_init(&trans->peer_lock);
16337 + atomic_set(&trans->peer_count,0);
16338 + trans->port = port;
16340 + /* create a UDP socket to be my actual transport endpoint */
16341 + ret = sock_create(PF_INET,SOCK_DGRAM,IPPROTO_UDP,&trans->socket);
16345 + /* use the specified port */
16347 + memset(&sin,0,sizeof(sin));
16348 + sin.sin_family = AF_INET;
16349 + sin.sin_port = htons(port);
16350 + ret = trans->socket->ops->bind(trans->socket,(struct sockaddr *)&sin,sizeof(sin));
16356 + oldfs = get_fs();
16357 + set_fs(KERNEL_DS);
16358 + ret = trans->socket->ops->setsockopt(trans->socket,SOL_IP,IP_RECVERR,
16359 + (char*)&opt,sizeof(opt));
16362 + spin_lock(&rxrpc_transports_lock);
16363 + list_add(&trans->link,&rxrpc_transports);
16364 + spin_unlock(&rxrpc_transports_lock);
16366 + /* set the socket up */
16367 + sock = trans->socket->sk;
16368 + sock->user_data = trans;
16369 + sock->data_ready = rxrpc_data_ready;
16370 + sock->error_report = rxrpc_error_report;
16372 + down_write(&rxrpc_proc_transports_sem);
16373 + list_add_tail(&trans->proc_link,&rxrpc_proc_transports);
16374 + up_write(&rxrpc_proc_transports_sem);
16376 + __RXACCT(atomic_inc(&rxrpc_transport_count));
16379 + _leave(" = 0 (%p)",trans);
16383 + rxrpc_put_transport(trans);
16385 + _leave(" = %d",ret);
16389 +} /* end rxrpc_create_transport() */
16391 +/*****************************************************************************/
16393 + * clear the connections on a transport endpoint
16395 +void rxrpc_clear_transport(struct rxrpc_transport *trans)
16397 + //struct rxrpc_connection *conn;
16399 +} /* end rxrpc_clear_transport() */
16401 +/*****************************************************************************/
16403 + * destroy a transport endpoint
16405 +void rxrpc_put_transport(struct rxrpc_transport *trans)
16407 + _enter("%p{u=%d p=%hu}",trans,atomic_read(&trans->usage),trans->port);
16409 + if (atomic_read(&trans->usage)<=0)
16412 + /* to prevent a race, the decrement and the dequeue must be effectively atomic */
16413 + spin_lock(&rxrpc_transports_lock);
16414 + if (likely(!atomic_dec_and_test(&trans->usage))) {
16415 + spin_unlock(&rxrpc_transports_lock);
16420 + list_del(&trans->link);
16421 + spin_unlock(&rxrpc_transports_lock);
16423 + /* finish cleaning up the transport */
16424 + if (trans->socket)
16425 + trans->socket->ops->shutdown(trans->socket,2);
16427 + rxrpc_krxsecd_clear_transport(trans);
16428 + rxrpc_krxiod_dequeue_transport(trans);
16430 + /* discard all peer information */
16431 + rxrpc_peer_clearall(trans);
16433 + down_write(&rxrpc_proc_transports_sem);
16434 + list_del(&trans->proc_link);
16435 + up_write(&rxrpc_proc_transports_sem);
16436 + __RXACCT(atomic_dec(&rxrpc_transport_count));
16438 + /* close the socket */
16439 + if (trans->socket) {
16440 + trans->socket->sk->user_data = NULL;
16441 + sock_release(trans->socket);
16442 + trans->socket = NULL;
16449 +} /* end rxrpc_put_transport() */
16451 +/*****************************************************************************/
16453 + * add a service to a transport to be listened upon
16455 +int rxrpc_add_service(struct rxrpc_transport *trans, struct rxrpc_service *newsrv)
16457 + struct rxrpc_service *srv;
16458 + struct list_head *_p;
16459 + int ret = -EEXIST;
16461 + _enter("%p{%hu},%p{%hu}",trans,trans->port,newsrv,newsrv->service_id);
16463 + /* verify that the service ID is not already present */
16464 + spin_lock(&trans->lock);
16466 + list_for_each(_p,&trans->services) {
16467 + srv = list_entry(_p,struct rxrpc_service,link);
16468 + if (srv->service_id==newsrv->service_id)
16472 + /* okay - add the transport to the list */
16473 + list_add_tail(&newsrv->link,&trans->services);
16474 + rxrpc_get_transport(trans);
16478 + spin_unlock(&trans->lock);
16480 + _leave("= %d",ret);
16483 +} /* end rxrpc_add_service() */
16485 +/*****************************************************************************/
16487 + * remove a service from a transport
16489 +void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
16491 + _enter("%p{%hu},%p{%hu}",trans,trans->port,srv,srv->service_id);
16493 + spin_lock(&trans->lock);
16494 + list_del(&srv->link);
16495 + spin_unlock(&trans->lock);
16497 + rxrpc_put_transport(trans);
16501 +} /* end rxrpc_del_service() */
16503 +/*****************************************************************************/
16505 + * INET callback when data has been received on the socket.
16507 +static void rxrpc_data_ready(struct sock *sk, int count)
16509 + struct rxrpc_transport *trans;
16511 + _enter("%p{t=%p},%d",sk,sk->user_data,count);
16513 + /* queue the transport for attention by krxiod */
16514 + trans = (struct rxrpc_transport *) sk->user_data;
16516 + rxrpc_krxiod_queue_transport(trans);
16518 + /* wake up anyone waiting on the socket */
16519 + if (sk->sleep && waitqueue_active(sk->sleep))
16520 + wake_up_interruptible(sk->sleep);
16524 +} /* end rxrpc_data_ready() */
16526 +/*****************************************************************************/
16528 + * INET callback when an ICMP error packet is received
16529 + * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE)
16531 +static void rxrpc_error_report(struct sock *sk)
16533 + struct rxrpc_transport *trans;
16535 + _enter("%p{t=%p}",sk,sk->user_data);
16537 + /* queue the transport for attention by krxiod */
16538 + trans = (struct rxrpc_transport *) sk->user_data;
16540 + trans->error_rcvd = 1;
16541 + rxrpc_krxiod_queue_transport(trans);
16544 + /* wake up anyone waiting on the socket */
16545 + if (sk->sleep && waitqueue_active(sk->sleep))
16546 + wake_up_interruptible(sk->sleep);
16550 +} /* end rxrpc_error_report() */
16552 +/*****************************************************************************/
16554 + * split a message up, allocating message records and filling them in from the contents of a
16557 +static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
16558 + struct sk_buff *pkt,
16559 + struct list_head *msgq)
16561 + struct rxrpc_message *msg;
16566 + msg = kmalloc(sizeof(struct rxrpc_message),GFP_KERNEL);
16568 + _leave(" = -ENOMEM");
16572 + memset(msg,0,sizeof(*msg));
16573 + atomic_set(&msg->usage,1);
16574 + list_add_tail(&msg->link,msgq);
16576 + /* dig out the Rx routing parameters */
16577 + if (skb_copy_bits(pkt,sizeof(struct udphdr),&msg->hdr,sizeof(msg->hdr))<0) {
16582 + msg->trans = trans;
16583 + msg->state = RXRPC_MSG_RECEIVED;
16584 + msg->stamp = pkt->stamp;
16585 + msg->seq = ntohl(msg->hdr.seq);
16587 + /* attach the packet */
16591 + msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header);
16592 + msg->dsize = msg->pkt->len - msg->offset;
16594 + _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
16595 + msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
16596 + ntohl(msg->hdr.epoch),
16597 + (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
16598 + ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
16599 + ntohl(msg->hdr.callNumber),
16600 + rxrpc_pkts[msg->hdr.type],
16602 + ntohs(msg->hdr.serviceId),
16603 + msg->hdr.securityIndex);
16605 + __RXACCT(atomic_inc(&rxrpc_message_count));
16607 + /* split off jumbo packets */
16608 + while (msg->hdr.type==RXRPC_PACKET_TYPE_DATA && msg->hdr.flags & RXRPC_JUMBO_PACKET) {
16609 + struct rxrpc_jumbo_header jumbo;
16610 + struct rxrpc_message *jumbomsg = msg;
16612 + _debug("split jumbo packet");
16614 + /* quick sanity check */
16616 + if (msg->dsize < RXRPC_JUMBO_DATALEN+sizeof(struct rxrpc_jumbo_header))
16618 + if (msg->hdr.flags & RXRPC_LAST_PACKET)
16621 + /* dig out the secondary header */
16622 + if (skb_copy_bits(pkt,msg->offset+RXRPC_JUMBO_DATALEN,&jumbo,sizeof(jumbo))<0)
16625 + /* allocate a new message record */
16627 + msg = kmalloc(sizeof(struct rxrpc_message),GFP_KERNEL);
16631 + memcpy(msg,jumbomsg,sizeof(*msg));
16632 + list_add_tail(&msg->link,msgq);
16634 + /* adjust the jumbo packet */
16635 + jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
16637 + /* attach the packet here too */
16640 + /* adjust the parameters */
16642 + msg->hdr.seq = htonl(msg->seq);
16643 + msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
16644 + msg->offset += RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header);
16645 + msg->dsize -= RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header);
16646 + msg->hdr.flags = jumbo.flags;
16647 + msg->hdr._rsvd = jumbo._rsvd;
16649 + _net("Rx Split jumbo packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
16650 + msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
16651 + ntohl(msg->hdr.epoch),
16652 + (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
16653 + ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
16654 + ntohl(msg->hdr.callNumber),
16655 + rxrpc_pkts[msg->hdr.type],
16657 + ntohs(msg->hdr.serviceId),
16658 + msg->hdr.securityIndex);
16660 + __RXACCT(atomic_inc(&rxrpc_message_count));
16663 + _leave(" = 0 #%d",atomic_read(&rxrpc_message_count));
16667 + while (!list_empty(msgq)) {
16668 + msg = list_entry(msgq->next,struct rxrpc_message,link);
16669 + list_del_init(&msg->link);
16671 + rxrpc_put_message(msg);
16674 + _leave(" = %d",ret);
16676 +} /* end rxrpc_incoming_msg() */
16678 +/*****************************************************************************/
16680 + * accept a new call
16681 + * - called from krxiod in process context
16683 +void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
16685 + struct rxrpc_message *msg;
16686 + struct rxrpc_peer *peer;
16687 + struct sk_buff *pkt;
16694 + _enter("%p{%d}",trans,trans->port);
16697 + /* deal with outstanting errors first */
16698 + if (trans->error_rcvd)
16699 + rxrpc_trans_receive_error_report(trans);
16701 + /* attempt to receive a packet */
16702 + pkt = skb_recv_datagram(trans->socket->sk,0,1,&ret);
16704 + if (ret==-EAGAIN) {
16705 + _leave(" EAGAIN");
16709 + /* an icmp error may have occurred */
16710 + rxrpc_krxiod_queue_transport(trans);
16711 + _leave(" error %d\n",ret);
16715 + /* we'll probably need to checksum it (didn't call sock_recvmsg) */
16716 + if (pkt->ip_summed != CHECKSUM_UNNECESSARY) {
16717 + if ((unsigned short)csum_fold(skb_checksum(pkt,0,pkt->len,pkt->csum))) {
16719 + rxrpc_krxiod_queue_transport(trans);
16720 + _leave(" CSUM failed");
16725 + addr = pkt->nh.iph->saddr;
16726 + port = pkt->h.uh->source;
16728 + _net("Rx Received UDP packet from %08x:%04hu",ntohl(addr),ntohs(port));
16730 + /* unmarshall the Rx parameters and split jumbo packets */
16731 + ret = rxrpc_incoming_msg(trans,pkt,&msgq);
16734 + rxrpc_krxiod_queue_transport(trans);
16735 + _leave(" bad packet");
16739 + if (list_empty(&msgq)) BUG();
16741 + msg = list_entry(msgq.next,struct rxrpc_message,link);
16743 + /* locate the record for the peer from which it originated */
16744 + ret = rxrpc_peer_lookup(trans,addr,&peer);
16746 + kdebug("Rx No connections from that peer");
16747 + rxrpc_trans_immediate_abort(trans,msg,-EINVAL);
16748 + goto finished_msg;
16751 + /* try and find a matching connection */
16752 + ret = rxrpc_connection_lookup(peer,msg,&msg->conn);
16754 + kdebug("Rx Unknown Connection");
16755 + rxrpc_trans_immediate_abort(trans,msg,-EINVAL);
16756 + rxrpc_put_peer(peer);
16757 + goto finished_msg;
16759 + rxrpc_put_peer(peer);
16761 + /* deal with the first packet of a new call */
16762 + if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
16763 + msg->hdr.type==RXRPC_PACKET_TYPE_DATA &&
16764 + ntohl(msg->hdr.seq)==1
16766 + _debug("Rx New server call");
16767 + rxrpc_trans_receive_new_call(trans,&msgq);
16768 + goto finished_msg;
16771 + /* deal with subsequent packet(s) of call */
16772 + _debug("Rx Call packet");
16773 + while (!list_empty(&msgq)) {
16774 + msg = list_entry(msgq.next,struct rxrpc_message,link);
16775 + list_del_init(&msg->link);
16777 + ret = rxrpc_conn_receive_call_packet(msg->conn,NULL,msg);
16779 + rxrpc_trans_immediate_abort(trans,msg,ret);
16780 + rxrpc_put_message(msg);
16781 + goto finished_msg;
16784 + rxrpc_put_message(msg);
16787 + goto finished_msg;
16789 + /* dispose of the packets */
16791 + while (!list_empty(&msgq)) {
16792 + msg = list_entry(msgq.next,struct rxrpc_message,link);
16793 + list_del_init(&msg->link);
16795 + rxrpc_put_message(msg);
16802 +} /* end rxrpc_trans_receive_packet() */
16804 +/*****************************************************************************/
16806 + * accept a new call from a client trying to connect to one of my services
16807 + * - called in process context
16809 +static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
16810 + struct list_head *msgq)
16812 + struct rxrpc_message *msg;
16816 + /* only bother with the first packet */
16817 + msg = list_entry(msgq->next,struct rxrpc_message,link);
16818 + list_del_init(&msg->link);
16819 + rxrpc_krxsecd_queue_incoming_call(msg);
16820 + rxrpc_put_message(msg);
16825 +} /* end rxrpc_trans_receive_new_call() */
16827 +/*****************************************************************************/
16829 + * perform an immediate abort without connection or call structures
16831 +int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
16832 + struct rxrpc_message *msg,
16835 + struct rxrpc_header ahdr;
16836 + struct sockaddr_in sin;
16837 + struct msghdr msghdr;
16838 + struct iovec iov[2];
16839 + mm_segment_t oldfs;
16843 + _enter("%p,%p,%d",trans,msg,error);
16845 + /* don't abort an abort packet */
16846 + if (msg->hdr.type==RXRPC_PACKET_TYPE_ABORT) {
16851 + _error = htonl(-error);
16853 + /* set up the message to be transmitted */
16854 + memcpy(&ahdr,&msg->hdr,sizeof(ahdr));
16855 + ahdr.epoch = msg->hdr.epoch;
16856 + ahdr.serial = htonl(1);
16858 + ahdr.type = RXRPC_PACKET_TYPE_ABORT;
16859 + ahdr.flags = RXRPC_LAST_PACKET | (~msg->hdr.flags & RXRPC_CLIENT_INITIATED);
16861 + iov[0].iov_len = sizeof(ahdr);
16862 + iov[0].iov_base = &ahdr;
16863 + iov[1].iov_len = sizeof(_error);
16864 + iov[1].iov_base = &_error;
16866 + len = sizeof(ahdr) + sizeof(_error);
16868 + memset(&sin,0,sizeof(sin));
16869 + sin.sin_family = AF_INET;
16870 + sin.sin_port = msg->pkt->h.uh->source;
16871 + sin.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
16873 + msghdr.msg_name = &sin;
16874 + msghdr.msg_namelen = sizeof(sin);
16875 + msghdr.msg_iov = iov;
16876 + msghdr.msg_iovlen = 2;
16877 + msghdr.msg_control = NULL;
16878 + msghdr.msg_controllen = 0;
16879 + msghdr.msg_flags = MSG_DONTWAIT;
16881 + _net("Sending message type %d of %d bytes to %08x:%d",
16884 + htonl(sin.sin_addr.s_addr),
16885 + htons(sin.sin_port));
16887 + /* send the message */
16888 + oldfs = get_fs();
16889 + set_fs(KERNEL_DS);
16890 + ret = sock_sendmsg(trans->socket,&msghdr,len);
16893 + _leave(" = %d",ret);
16895 +} /* end rxrpc_trans_immediate_abort() */
16897 +/*****************************************************************************/
16899 + * receive an ICMP error report and percolate it to all connections heading to the affected
16902 +static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
16904 + struct rxrpc_connection *conn;
16905 + struct sockaddr_in sin;
16906 + struct rxrpc_peer *peer;
16907 + struct list_head connq, *_p;
16908 + struct errormsg emsg;
16909 + struct msghdr msg;
16910 + mm_segment_t oldfs;
16914 + _enter("%p",trans);
16917 + trans->error_rcvd = 0;
16919 + /* try and receive an error message */
16920 + msg.msg_name = &sin;
16921 + msg.msg_namelen = sizeof(sin);
16922 + msg.msg_iov = NULL;
16923 + msg.msg_iovlen = 0;
16924 + msg.msg_control = &emsg;
16925 + msg.msg_controllen = sizeof(emsg);
16926 + msg.msg_flags = 0;
16928 + oldfs = get_fs();
16929 + set_fs(KERNEL_DS);
16930 + err = sock_recvmsg(trans->socket,&msg,0,MSG_ERRQUEUE|MSG_DONTWAIT|MSG_TRUNC);
16933 + if (err==-EAGAIN) {
16939 + printk("%s: unable to recv an error report: %d\n",__FUNCTION__,err);
16944 + msg.msg_controllen = (char*)msg.msg_control - (char*)&emsg;
16946 + if (msg.msg_controllen<sizeof(emsg.cmsg) || msg.msg_namelen<sizeof(sin)) {
16947 + printk("%s: short control message (nlen=%u clen=%u fl=%x)\n",
16948 + __FUNCTION__,msg.msg_namelen,msg.msg_controllen,msg.msg_flags);
16952 + _net("Rx Received control message { len=%u level=%u type=%u }",
16953 + emsg.cmsg.cmsg_len,emsg.cmsg.cmsg_level,emsg.cmsg.cmsg_type);
16955 + if (sin.sin_family!=AF_INET) {
16956 + printk("Rx Ignoring error report with non-INET address (fam=%u)",
16961 + _net("Rx Received message pertaining to host addr=%x port=%hu",
16962 + ntohl(sin.sin_addr.s_addr),ntohs(sin.sin_port));
16964 + if (emsg.cmsg.cmsg_level!=SOL_IP || emsg.cmsg.cmsg_type!=IP_RECVERR) {
16965 + printk("Rx Ignoring unknown error report { level=%u type=%u }",
16966 + emsg.cmsg.cmsg_level,emsg.cmsg.cmsg_type);
16970 + if (msg.msg_controllen<sizeof(emsg.cmsg)+sizeof(emsg.ee)) {
16971 + printk("%s: short error message (%u)\n",__FUNCTION__,msg.msg_controllen);
16976 + port = sin.sin_port;
16978 + switch (emsg.ee.ee_origin) {
16979 + case SO_EE_ORIGIN_ICMP:
16981 + switch (emsg.ee.ee_type) {
16982 + case ICMP_DEST_UNREACH:
16983 + switch (emsg.ee.ee_code) {
16984 + case ICMP_NET_UNREACH:
16985 + _net("Rx Received ICMP Network Unreachable");
16987 + err = -ENETUNREACH;
16989 + case ICMP_HOST_UNREACH:
16990 + _net("Rx Received ICMP Host Unreachable");
16992 + err = -EHOSTUNREACH;
16994 + case ICMP_PORT_UNREACH:
16995 + _net("Rx Received ICMP Port Unreachable");
16996 + err = -ECONNREFUSED;
16998 + case ICMP_NET_UNKNOWN:
16999 + _net("Rx Received ICMP Unknown Network");
17001 + err = -ENETUNREACH;
17003 + case ICMP_HOST_UNKNOWN:
17004 + _net("Rx Received ICMP Unknown Host");
17006 + err = -EHOSTUNREACH;
17009 + _net("Rx Received ICMP DestUnreach { code=%u }",
17010 + emsg.ee.ee_code);
17011 + err = emsg.ee.ee_errno;
17016 + case ICMP_TIME_EXCEEDED:
17017 + _net("Rx Received ICMP TTL Exceeded");
17018 + err = emsg.ee.ee_errno;
17022 + _proto("Rx Received ICMP error { type=%u code=%u }",
17023 + emsg.ee.ee_type,emsg.ee.ee_code);
17024 + err = emsg.ee.ee_errno;
17029 + case SO_EE_ORIGIN_LOCAL:
17030 + _proto("Rx Received local error { error=%d }",emsg.ee.ee_errno);
17032 + err = emsg.ee.ee_errno;
17035 + case SO_EE_ORIGIN_NONE:
17036 + case SO_EE_ORIGIN_ICMP6:
17038 + _proto("Rx Received error report { orig=%u }",emsg.ee.ee_origin);
17040 + err = emsg.ee.ee_errno;
17044 + /* find all the connections between this transport and the affected destination */
17045 + INIT_LIST_HEAD(&connq);
17047 + if (rxrpc_peer_lookup(trans,sin.sin_addr.s_addr,&peer)==0) {
17048 + read_lock(&peer->conn_lock);
17049 + list_for_each(_p,&peer->conn_active) {
17050 + conn = list_entry(_p,struct rxrpc_connection,link);
17051 + if (port && conn->addr.sin_port!=port)
17053 + if (!list_empty(&conn->err_link))
17056 + rxrpc_get_connection(conn);
17057 + list_add_tail(&conn->err_link,&connq);
17059 + read_unlock(&peer->conn_lock);
17061 + /* service all those connections */
17062 + while (!list_empty(&connq)) {
17063 + conn = list_entry(connq.next,struct rxrpc_connection,err_link);
17064 + list_del(&conn->err_link);
17066 + rxrpc_conn_handle_error(conn,local,err);
17068 + rxrpc_put_connection(conn);
17071 + rxrpc_put_peer(peer);
17077 +} /* end rxrpc_trans_receive_error_report() */