]> git.pld-linux.org Git - packages/kernel.git/blame - linux-2.4.17-pthread_support.patch
- obsolete
[packages/kernel.git] / linux-2.4.17-pthread_support.patch
CommitLineData
9e397995 1diff -urN --exclude-from=/usr/src/dontdiff linux-2.4.17_original/linux/include/linux/sched.h linux-2.4.17_SEMUNDO/linux/include/linux/sched.h
2--- linux-2.4.17_original/linux/include/linux/sched.h Fri Dec 21 09:42:03 2001
3+++ linux-2.4.17_SEMUNDO/linux/include/linux/sched.h Mon Jan 21 19:12:35 2002
4@@ -381,7 +381,7 @@
5 struct tty_struct *tty; /* NULL if no tty */
6 unsigned int locks; /* How many file locks are being held */
7 /* ipc stuff */
8- struct sem_undo *semundo;
9+ struct sem_undohd *semundohd;
10 struct sem_queue *semsleeping;
11 /* CPU-specific state of this task */
12 struct thread_struct thread;
13diff -urN --exclude-from=/usr/src/dontdiff linux-2.4.17_original/linux/include/linux/sem.h linux-2.4.17_SEMUNDO/linux/include/linux/sem.h
14--- linux-2.4.17_original/linux/include/linux/sem.h Thu Nov 22 11:46:18 2001
15+++ linux-2.4.17_SEMUNDO/linux/include/linux/sem.h Mon Jan 21 19:12:35 2002
16@@ -121,6 +121,18 @@
17 short * semadj; /* array of adjustments, one per semaphore */
18 };
19
20+/* Each PROCESS (i.e. collection of tasks that are running POSIX style threads)
21+ * must share the same semundo list, in order to support POSIX SEMUNDO
22+ * semantics for threads. The sem_undohd controls shared access to this
23+ * list among all the tasks (threads) in that process.
24+ */
25+struct sem_undohd {
26+ atomic_t refcnt;
27+ spinlock_t lock;
28+ volatile unsigned long add_count;
29+ struct sem_undo *proc_list;
30+};
31+
32 asmlinkage long sys_semget (key_t key, int nsems, int semflg);
33 asmlinkage long sys_semop (int semid, struct sembuf *sops, unsigned nsops);
34 asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg);
35diff -urN --exclude-from=/usr/src/dontdiff linux-2.4.17_original/linux/ipc/sem.c linux-2.4.17_SEMUNDO/linux/ipc/sem.c
36--- linux-2.4.17_original/linux/ipc/sem.c Sun Sep 30 12:26:42 2001
37+++ linux-2.4.17_SEMUNDO/linux/ipc/sem.c Mon Jan 21 19:08:05 2002
38@@ -788,12 +788,75 @@
39 }
40 }
41
42-static struct sem_undo* freeundos(struct sem_array *sma, struct sem_undo* un)
43+static inline void lock_semundo(void)
44+{
45+ struct sem_undohd *undohd;
46+
47+ undohd = current->semundohd;
48+ if ((undohd != NULL) && (atomic_read(&undohd->refcnt) != 1))
49+ spin_lock(&undohd->lock);
50+}
51+
52+/* This code has an interesting interaction with copy_semundo():
53+ * two tasks could have been sharing the semundohd at the time "first" one
54+ * of those tasks acquires the lock acquired in lock_semundo. If the other
55+ * tasks exits before * "the first one" releases the lock (by calling
56+ * unlock_semundo), then the spin_unlock would NOT be called. This would
57+ * leave the semundohd in a locked state. This would NOT be a problem unless
58+ * the remaining task once again creates a new task that once again shares the
59+ * semundohd. Cleanup up this last case is dealt with in copy_semundo by
60+ * having it reinitialize the spin_lock when it once again creates a second
61+ * task sharing the semundo.
62+ */
63+static inline void unlock_semundo(void)
64+{
65+ struct sem_undohd *undohd;
66+
67+ undohd = current->semundohd;
68+ if ((undohd != NULL) && (atomic_read(&undohd->refcnt) != 1))
69+ spin_unlock(&undohd->lock);
70+}
71+
72+
73+/* If the task doesn't already have a semundohd, then allocate one
74+ * here. We guarantee there is only one thread using this undo list,
75+ * and current is THE ONE
76+ *
77+ * If this allocation and assignment succeeds, but later
78+ * portions of this code fail, there is no need to free the sem_undohd.
79+ * Just let it stay associated with the task, and it'll be freed later
80+ * at exit time.
81+ *
82+ * This can block, so callers must hold no locks.
83+ */
84+static inline int get_undohd(struct sem_undohd **undohdp)
85+{
86+ struct sem_undohd *undohd;
87+ int size;
88+
89+ undohd = current->semundohd;
90+ if (!undohd) {
91+ size = sizeof(struct sem_undohd);
92+ undohd = (struct sem_undohd *) kmalloc(size, GFP_KERNEL);
93+ if (undohd == NULL)
94+ return -ENOMEM;
95+ memset(undohd, 0, size);
96+ /* don't initialize unodhd->lock here. It's done
97+ * in copy_semundo() instead.
98+ */
99+ atomic_set(&undohd->refcnt, 1);
100+ current->semundohd = undohd;
101+ }
102+ *undohdp = undohd;
103+ return 0;
104+}
105+
106+static struct sem_undo* freeundos(struct sem_undo* un)
107 {
108 struct sem_undo* u;
109 struct sem_undo** up;
110
111- for(up = &current->semundo;(u=*up);up=&u->proc_next) {
112+ for(up = &current->semundohd->proc_list;(u=*up);up=&u->proc_next) {
113 if(un==u) {
114 un=u->proc_next;
115 *up=un;
116@@ -805,33 +868,87 @@
117 return un->proc_next;
118 }
119
120-/* returns without sem_lock on error! */
121+static inline struct sem_undo *find_undo(int semid)
122+{
123+ struct sem_undo *un;
124+
125+ un = NULL;
126+ if (current->semundohd != NULL) {
127+ un = current->semundohd->proc_list;
128+ }
129+ while(un != NULL) {
130+ if(un->semid==semid)
131+ break;
132+ if(un->semid==-1)
133+ un=freeundos(un);
134+ else
135+ un=un->proc_next;
136+ }
137+ return un;
138+}
139+
140+/* returns without sem_lock and semundo list locks on error! */
141 static int alloc_undo(struct sem_array *sma, struct sem_undo** unp, int semid, int alter)
142 {
143 int size, nsems, error;
144- struct sem_undo *un;
145+ struct sem_undo *un, *new_un;
146+ struct sem_undohd *unhd;
147+ unsigned long saved_add_count;
148+
149
150 nsems = sma->sem_nsems;
151- size = sizeof(struct sem_undo) + sizeof(short)*nsems;
152+ saved_add_count = 0;
153+ if (current->semundohd != NULL)
154+ saved_add_count = current->semundohd->add_count;
155 sem_unlock(semid);
156+ unlock_semundo();
157
158+ error = get_undohd(&unhd);
159+ if (error)
160+ return error;
161+
162+ size = sizeof(struct sem_undo) + sizeof(short)*nsems;
163 un = (struct sem_undo *) kmalloc(size, GFP_KERNEL);
164 if (!un)
165 return -ENOMEM;
166
167 memset(un, 0, size);
168+ lock_semundo();
169 error = sem_revalidate(semid, sma, nsems, alter ? S_IWUGO : S_IRUGO);
170 if(error) {
171+ unlock_semundo();
172 kfree(un);
173 return error;
174 }
175
176- un->semadj = (short *) &un[1];
177- un->semid = semid;
178- un->proc_next = current->semundo;
179- current->semundo = un;
180- un->id_next = sma->undo;
181- sma->undo = un;
182+
183+ /* alloc_undo has just
184+ * released all locks and reacquired them.
185+ * But, another thread may have
186+ * added the semundo we were looking for
187+ * during that time.
188+ * So, we check for it again.
189+ * only initialize and add the new one
190+ * if we don't discover one.
191+ */
192+ new_un = NULL;
193+ if (current->semundohd->add_count != saved_add_count)
194+ new_un = find_undo(semid);
195+
196+ if (new_un != NULL) {
197+ if (sma->undo != new_un)
198+ BUG();
199+ kfree(un);
200+ un = new_un;
201+ } else {
202+ current->semundohd->add_count++;
203+ un->semadj = (short *) &un[1];
204+ un->semid = semid;
205+ un->proc_next = unhd->proc_list;
206+ unhd->proc_list = un;
207+ un->id_next = sma->undo;
208+ sma->undo = un;
209+ }
210 *unp = un;
211 return 0;
212 }
213@@ -846,6 +963,7 @@
214 int undos = 0, decrease = 0, alter = 0;
215 struct sem_queue queue;
216
217+
218 if (nsops < 1 || semid < 0)
219 return -EINVAL;
220 if (nsops > sc_semopm)
221@@ -859,17 +977,18 @@
222 error=-EFAULT;
223 goto out_free;
224 }
225+ lock_semundo();
226 sma = sem_lock(semid);
227 error=-EINVAL;
228 if(sma==NULL)
229- goto out_free;
230+ goto out_semundo_free;
231 error = -EIDRM;
232 if (sem_checkid(sma,semid))
233- goto out_unlock_free;
234+ goto out_unlock_semundo_free;
235 error = -EFBIG;
236 for (sop = sops; sop < sops + nsops; sop++) {
237 if (sop->sem_num >= sma->sem_nsems)
238- goto out_unlock_free;
239+ goto out_unlock_semundo_free;
240 if (sop->sem_flg & SEM_UNDO)
241 undos++;
242 if (sop->sem_op < 0)
243@@ -881,24 +1000,18 @@
244
245 error = -EACCES;
246 if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
247- goto out_unlock_free;
248+ goto out_unlock_semundo_free;
249 if (undos) {
250 /* Make sure we have an undo structure
251 * for this process and this semaphore set.
252 */
253- un=current->semundo;
254- while(un != NULL) {
255- if(un->semid==semid)
256- break;
257- if(un->semid==-1)
258- un=freeundos(sma,un);
259- else
260- un=un->proc_next;
261- }
262+
263+ un = find_undo(semid);
264 if (!un) {
265 error = alloc_undo(sma,&un,semid,alter);
266- if(error)
267+ if (error)
268 goto out_free;
269+
270 }
271 } else
272 un = NULL;
273@@ -930,16 +1043,18 @@
274 queue.sleeper = current;
275 current->state = TASK_INTERRUPTIBLE;
276 sem_unlock(semid);
277+ unlock_semundo();
278
279 schedule();
280
281+ lock_semundo();
282 tmp = sem_lock(semid);
283 if(tmp==NULL) {
284 if(queue.prev != NULL)
285 BUG();
286 current->semsleeping = NULL;
287 error = -EIDRM;
288- goto out_free;
289+ goto out_semundo_free;
290 }
291 /*
292 * If queue.status == 1 we where woken up and
293@@ -960,7 +1075,7 @@
294 break;
295 /* Everything done by update_queue */
296 current->semsleeping = NULL;
297- goto out_unlock_free;
298+ goto out_unlock_semundo_free;
299 }
300 }
301 current->semsleeping = NULL;
302@@ -968,14 +1083,61 @@
303 update:
304 if (alter)
305 update_queue (sma);
306-out_unlock_free:
307+out_unlock_semundo_free:
308 sem_unlock(semid);
309+out_semundo_free:
310+ unlock_semundo();
311 out_free:
312 if(sops != fast_sops)
313 kfree(sops);
314 return error;
315 }
316
317+/* For now, assume that if ALL clone flags are set, then
318+ * we must be creating a POSIX thread, and we want undo lists
319+ * to be shared among all the threads in that thread group.
320+ *
321+ * See the notes above unlock_semundo() regarding the spin_lock_init()
322+ * in this code. Initialize the undohd->lock here instead of get_undohd()
323+ * because of the reasoning in the note referenced here.
324+ */
325+#define CLONE_SEMUNDO (CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_SIGHAND)
326+
327+int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
328+{
329+ struct sem_undohd *undohd;
330+ int error;
331+
332+ if (((clone_flags & CLONE_SEMUNDO) == CLONE_SEMUNDO) ||
333+ (clone_flags & CLONE_THREAD)) {
334+ error = get_undohd(&undohd);
335+ if (error)
336+ return error;
337+ if (atomic_read(&undohd->refcnt) == 1)
338+ spin_lock_init(&undohd->lock);
339+ atomic_inc(&undohd->refcnt);
340+ tsk->semundohd = undohd;
341+ } else
342+ tsk->semundohd = NULL;
343+
344+ return 0;
345+}
346+
347+static inline void __exit_semundo(struct task_struct *tsk)
348+{
349+ struct sem_undohd *unhd;
350+
351+ unhd = tsk->semundohd;
352+ if (!atomic_dec_and_test(&unhd->refcnt))
353+ kfree(unhd);
354+}
355+
356+void exit_semundo(struct task_struct *tsk)
357+{
358+ if (tsk->semundohd != NULL)
359+ __exit_semundo(tsk);
360+}
361+
362 /*
363 * add semadj values to semaphores, free undo structures.
364 * undo structures are not freed when semaphore arrays are destroyed
365@@ -993,6 +1155,7 @@
366 struct sem_queue *q;
367 struct sem_undo *u, *un = NULL, **up, **unp;
368 struct sem_array *sma;
369+ struct sem_undohd *undohd;
370 int nsems, i;
371
372 /* If the current process was sleeping for a semaphore,
373@@ -1012,7 +1175,14 @@
374 sem_unlock(semid);
375 }
376
377- for (up = &current->semundo; (u = *up); *up = u->proc_next, kfree(u)) {
378+ undohd = current->semundohd;
379+ if ((undohd == NULL) || (atomic_read(&undohd->refcnt) != 1))
380+ return;
381+
382+ /* There's no need to hold the semundo list lock, as current
383+ * is the last task exiting for this undo list.
384+ */
385+ for (up = &undohd->proc_list; (u = *up); *up = u->proc_next, kfree(u)) {
386 int semid = u->semid;
387 if(semid == -1)
388 continue;
389@@ -1050,7 +1220,7 @@
390 next_entry:
391 sem_unlock(semid);
392 }
393- current->semundo = NULL;
394+ __exit_semundo(current);
395 }
396
397 #ifdef CONFIG_PROC_FS
398diff -urN --exclude-from=/usr/src/dontdiff linux-2.4.17_original/linux/ipc/util.c linux-2.4.17_SEMUNDO/linux/ipc/util.c
399--- linux-2.4.17_original/linux/ipc/util.c Sun Aug 12 17:37:53 2001
400+++ linux-2.4.17_SEMUNDO/linux/ipc/util.c Mon Jan 21 19:08:05 2002
401@@ -340,6 +340,17 @@
402 * Dummy functions when SYSV IPC isn't configured
403 */
404
405+int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
406+{
407+ return 0;
408+}
409+
410+void exit_semundo(struct task_struct *tsk)
411+{
412+ return;
413+}
414+
415+
416 void sem_exit (void)
417 {
418 return;
419diff -urN --exclude-from=/usr/src/dontdiff linux-2.4.17_original/linux/kernel/fork.c linux-2.4.17_SEMUNDO/linux/kernel/fork.c
420--- linux-2.4.17_original/linux/kernel/fork.c Wed Nov 21 10:18:42 2001
421+++ linux-2.4.17_SEMUNDO/linux/kernel/fork.c Mon Jan 21 19:08:05 2002
422@@ -26,6 +26,9 @@
423 #include <asm/uaccess.h>
424 #include <asm/mmu_context.h>
425
426+extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
427+extern void exit_semundo(struct task_struct *tsk);
428+
429 /* The idle threads do not count.. */
430 int nr_threads;
431 int nr_running;
432@@ -653,8 +656,10 @@
433
434 retval = -ENOMEM;
435 /* copy all the process information */
436- if (copy_files(clone_flags, p))
437+ if (copy_semundo(clone_flags, p))
438 goto bad_fork_cleanup;
439+ if (copy_files(clone_flags, p))
440+ goto bad_fork_cleanup_semundo;
441 if (copy_fs(clone_flags, p))
442 goto bad_fork_cleanup_files;
443 if (copy_sighand(clone_flags, p))
444@@ -664,7 +669,6 @@
445 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
446 if (retval)
447 goto bad_fork_cleanup_mm;
448- p->semundo = NULL;
449
450 /* Our parent execution domain becomes current domain
451 These must match for thread signalling to apply */
452@@ -738,6 +742,8 @@
453 exit_fs(p); /* blocking */
454 bad_fork_cleanup_files:
455 exit_files(p); /* blocking */
456+bad_fork_cleanup_semundo:
457+ exit_semundo(p);
458 bad_fork_cleanup:
459 put_exec_domain(p->exec_domain);
460 if (p->binfmt && p->binfmt->module)
This page took 0.360458 seconds and 4 git commands to generate.