1 diff -urN --exclude-from=/usr/src/dontdiff linux-2.4.17_original/linux/include/linux/sched.h linux-2.4.17_SEMUNDO/linux/include/linux/sched.h
2 --- linux-2.4.17_original/linux/include/linux/sched.h Fri Dec 21 09:42:03 2001
3 +++ linux-2.4.17_SEMUNDO/linux/include/linux/sched.h Mon Jan 21 19:12:35 2002
5 struct tty_struct *tty; /* NULL if no tty */
6 unsigned int locks; /* How many file locks are being held */
8 - struct sem_undo *semundo;
9 + struct sem_undohd *semundohd;
10 struct sem_queue *semsleeping;
11 /* CPU-specific state of this task */
12 struct thread_struct thread;
13 diff -urN --exclude-from=/usr/src/dontdiff linux-2.4.17_original/linux/include/linux/sem.h linux-2.4.17_SEMUNDO/linux/include/linux/sem.h
14 --- linux-2.4.17_original/linux/include/linux/sem.h Thu Nov 22 11:46:18 2001
15 +++ linux-2.4.17_SEMUNDO/linux/include/linux/sem.h Mon Jan 21 19:12:35 2002
17 short * semadj; /* array of adjustments, one per semaphore */
20 +/* Each PROCESS (i.e. collection of tasks that are running POSIX style threads)
21 + * must share the same semundo list, in order to support POSIX SEMUNDO
22 + * semantics for threads. The sem_undohd controls shared access to this
23 + * list among all the tasks (threads) in that process.
28 + volatile unsigned long add_count;
29 + struct sem_undo *proc_list;
32 asmlinkage long sys_semget (key_t key, int nsems, int semflg);
33 asmlinkage long sys_semop (int semid, struct sembuf *sops, unsigned nsops);
34 asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg);
35 diff -urN --exclude-from=/usr/src/dontdiff linux-2.4.17_original/linux/ipc/sem.c linux-2.4.17_SEMUNDO/linux/ipc/sem.c
36 --- linux-2.4.17_original/linux/ipc/sem.c Sun Sep 30 12:26:42 2001
37 +++ linux-2.4.17_SEMUNDO/linux/ipc/sem.c Mon Jan 21 19:08:05 2002
42 -static struct sem_undo* freeundos(struct sem_array *sma, struct sem_undo* un)
43 +static inline void lock_semundo(void)
45 + struct sem_undohd *undohd;
47 + undohd = current->semundohd;
48 + if ((undohd != NULL) && (atomic_read(&undohd->refcnt) != 1))
49 + spin_lock(&undohd->lock);
52 +/* This code has an interesting interaction with copy_semundo():
53 + * two tasks could have been sharing the semundohd at the time "first" one
54 + * of those tasks acquires the lock acquired in lock_semundo. If the other
55 + * tasks exits before * "the first one" releases the lock (by calling
56 + * unlock_semundo), then the spin_unlock would NOT be called. This would
57 + * leave the semundohd in a locked state. This would NOT be a problem unless
58 + * the remaining task once again creates a new task that once again shares the
59 + * semundohd. Cleanup up this last case is dealt with in copy_semundo by
60 + * having it reinitialize the spin_lock when it once again creates a second
61 + * task sharing the semundo.
63 +static inline void unlock_semundo(void)
65 + struct sem_undohd *undohd;
67 + undohd = current->semundohd;
68 + if ((undohd != NULL) && (atomic_read(&undohd->refcnt) != 1))
69 + spin_unlock(&undohd->lock);
73 +/* If the task doesn't already have a semundohd, then allocate one
74 + * here. We guarantee there is only one thread using this undo list,
75 + * and current is THE ONE
77 + * If this allocation and assignment succeeds, but later
78 + * portions of this code fail, there is no need to free the sem_undohd.
79 + * Just let it stay associated with the task, and it'll be freed later
82 + * This can block, so callers must hold no locks.
84 +static inline int get_undohd(struct sem_undohd **undohdp)
86 + struct sem_undohd *undohd;
89 + undohd = current->semundohd;
91 + size = sizeof(struct sem_undohd);
92 + undohd = (struct sem_undohd *) kmalloc(size, GFP_KERNEL);
95 + memset(undohd, 0, size);
96 + /* don't initialize unodhd->lock here. It's done
97 + * in copy_semundo() instead.
99 + atomic_set(&undohd->refcnt, 1);
100 + current->semundohd = undohd;
106 +static struct sem_undo* freeundos(struct sem_undo* un)
109 struct sem_undo** up;
111 - for(up = ¤t->semundo;(u=*up);up=&u->proc_next) {
112 + for(up = ¤t->semundohd->proc_list;(u=*up);up=&u->proc_next) {
116 @@ -805,33 +868,87 @@
117 return un->proc_next;
120 -/* returns without sem_lock on error! */
121 +static inline struct sem_undo *find_undo(int semid)
123 + struct sem_undo *un;
126 + if (current->semundohd != NULL) {
127 + un = current->semundohd->proc_list;
129 + while(un != NULL) {
130 + if(un->semid==semid)
140 +/* returns without sem_lock and semundo list locks on error! */
141 static int alloc_undo(struct sem_array *sma, struct sem_undo** unp, int semid, int alter)
143 int size, nsems, error;
144 - struct sem_undo *un;
145 + struct sem_undo *un, *new_un;
146 + struct sem_undohd *unhd;
147 + unsigned long saved_add_count;
150 nsems = sma->sem_nsems;
151 - size = sizeof(struct sem_undo) + sizeof(short)*nsems;
152 + saved_add_count = 0;
153 + if (current->semundohd != NULL)
154 + saved_add_count = current->semundohd->add_count;
158 + error = get_undohd(&unhd);
162 + size = sizeof(struct sem_undo) + sizeof(short)*nsems;
163 un = (struct sem_undo *) kmalloc(size, GFP_KERNEL);
169 error = sem_revalidate(semid, sma, nsems, alter ? S_IWUGO : S_IRUGO);
176 - un->semadj = (short *) &un[1];
178 - un->proc_next = current->semundo;
179 - current->semundo = un;
180 - un->id_next = sma->undo;
183 + /* alloc_undo has just
184 + * released all locks and reacquired them.
185 + * But, another thread may have
186 + * added the semundo we were looking for
187 + * during that time.
188 + * So, we check for it again.
189 + * only initialize and add the new one
190 + * if we don't discover one.
193 + if (current->semundohd->add_count != saved_add_count)
194 + new_un = find_undo(semid);
196 + if (new_un != NULL) {
197 + if (sma->undo != new_un)
202 + current->semundohd->add_count++;
203 + un->semadj = (short *) &un[1];
205 + un->proc_next = unhd->proc_list;
206 + unhd->proc_list = un;
207 + un->id_next = sma->undo;
214 int undos = 0, decrease = 0, alter = 0;
215 struct sem_queue queue;
218 if (nsops < 1 || semid < 0)
220 if (nsops > sc_semopm)
221 @@ -859,17 +977,18 @@
226 sma = sem_lock(semid);
230 + goto out_semundo_free;
232 if (sem_checkid(sma,semid))
233 - goto out_unlock_free;
234 + goto out_unlock_semundo_free;
236 for (sop = sops; sop < sops + nsops; sop++) {
237 if (sop->sem_num >= sma->sem_nsems)
238 - goto out_unlock_free;
239 + goto out_unlock_semundo_free;
240 if (sop->sem_flg & SEM_UNDO)
243 @@ -881,24 +1000,18 @@
246 if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
247 - goto out_unlock_free;
248 + goto out_unlock_semundo_free;
250 /* Make sure we have an undo structure
251 * for this process and this semaphore set.
253 - un=current->semundo;
254 - while(un != NULL) {
255 - if(un->semid==semid)
258 - un=freeundos(sma,un);
263 + un = find_undo(semid);
265 error = alloc_undo(sma,&un,semid,alter);
273 @@ -930,16 +1043,18 @@
274 queue.sleeper = current;
275 current->state = TASK_INTERRUPTIBLE;
282 tmp = sem_lock(semid);
284 if(queue.prev != NULL)
286 current->semsleeping = NULL;
289 + goto out_semundo_free;
292 * If queue.status == 1 we where woken up and
295 /* Everything done by update_queue */
296 current->semsleeping = NULL;
297 - goto out_unlock_free;
298 + goto out_unlock_semundo_free;
301 current->semsleeping = NULL;
302 @@ -968,14 +1083,61 @@
307 +out_unlock_semundo_free:
312 if(sops != fast_sops)
317 +/* For now, assume that if ALL clone flags are set, then
318 + * we must be creating a POSIX thread, and we want undo lists
319 + * to be shared among all the threads in that thread group.
321 + * See the notes above unlock_semundo() regarding the spin_lock_init()
322 + * in this code. Initialize the undohd->lock here instead of get_undohd()
323 + * because of the reasoning in the note referenced here.
325 +#define CLONE_SEMUNDO (CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_SIGHAND)
327 +int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
329 + struct sem_undohd *undohd;
332 + if (((clone_flags & CLONE_SEMUNDO) == CLONE_SEMUNDO) ||
333 + (clone_flags & CLONE_THREAD)) {
334 + error = get_undohd(&undohd);
337 + if (atomic_read(&undohd->refcnt) == 1)
338 + spin_lock_init(&undohd->lock);
339 + atomic_inc(&undohd->refcnt);
340 + tsk->semundohd = undohd;
342 + tsk->semundohd = NULL;
347 +static inline void __exit_semundo(struct task_struct *tsk)
349 + struct sem_undohd *unhd;
351 + unhd = tsk->semundohd;
352 + if (!atomic_dec_and_test(&unhd->refcnt))
356 +void exit_semundo(struct task_struct *tsk)
358 + if (tsk->semundohd != NULL)
359 + __exit_semundo(tsk);
363 * add semadj values to semaphores, free undo structures.
364 * undo structures are not freed when semaphore arrays are destroyed
367 struct sem_undo *u, *un = NULL, **up, **unp;
368 struct sem_array *sma;
369 + struct sem_undohd *undohd;
372 /* If the current process was sleeping for a semaphore,
373 @@ -1012,7 +1175,14 @@
377 - for (up = ¤t->semundo; (u = *up); *up = u->proc_next, kfree(u)) {
378 + undohd = current->semundohd;
379 + if ((undohd == NULL) || (atomic_read(&undohd->refcnt) != 1))
382 + /* There's no need to hold the semundo list lock, as current
383 + * is the last task exiting for this undo list.
385 + for (up = &undohd->proc_list; (u = *up); *up = u->proc_next, kfree(u)) {
386 int semid = u->semid;
389 @@ -1050,7 +1220,7 @@
393 - current->semundo = NULL;
394 + __exit_semundo(current);
397 #ifdef CONFIG_PROC_FS
398 diff -urN --exclude-from=/usr/src/dontdiff linux-2.4.17_original/linux/ipc/util.c linux-2.4.17_SEMUNDO/linux/ipc/util.c
399 --- linux-2.4.17_original/linux/ipc/util.c Sun Aug 12 17:37:53 2001
400 +++ linux-2.4.17_SEMUNDO/linux/ipc/util.c Mon Jan 21 19:08:05 2002
402 * Dummy functions when SYSV IPC isn't configured
405 +int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
410 +void exit_semundo(struct task_struct *tsk)
419 diff -urN --exclude-from=/usr/src/dontdiff linux-2.4.17_original/linux/kernel/fork.c linux-2.4.17_SEMUNDO/linux/kernel/fork.c
420 --- linux-2.4.17_original/linux/kernel/fork.c Wed Nov 21 10:18:42 2001
421 +++ linux-2.4.17_SEMUNDO/linux/kernel/fork.c Mon Jan 21 19:08:05 2002
423 #include <asm/uaccess.h>
424 #include <asm/mmu_context.h>
426 +extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
427 +extern void exit_semundo(struct task_struct *tsk);
429 /* The idle threads do not count.. */
435 /* copy all the process information */
436 - if (copy_files(clone_flags, p))
437 + if (copy_semundo(clone_flags, p))
438 goto bad_fork_cleanup;
439 + if (copy_files(clone_flags, p))
440 + goto bad_fork_cleanup_semundo;
441 if (copy_fs(clone_flags, p))
442 goto bad_fork_cleanup_files;
443 if (copy_sighand(clone_flags, p))
445 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
447 goto bad_fork_cleanup_mm;
450 /* Our parent execution domain becomes current domain
451 These must match for thread signalling to apply */
453 exit_fs(p); /* blocking */
454 bad_fork_cleanup_files:
455 exit_files(p); /* blocking */
456 +bad_fork_cleanup_semundo:
459 put_exec_domain(p->exec_domain);
460 if (p->binfmt && p->binfmt->module)