1 --- drbd.orig/drbd/drbd.c Fri May 25 13:13:22 2001
2 +++ drbd/drbd/drbd.c Sat Sep 8 10:04:56 2001
5 rlen = rlen + sprintf(buf + rlen, "\n");
7 - for (i = 0; i < NR_REQUEST; i++) {
8 + for (i = 0; i < QUEUE_NR_REQUESTS; i++) {
9 if (my_all_requests[i].rq_status == RQ_INACTIVE) {
14 del_timer(&mdev->s_timeout);
15 spin_lock_irqsave(¤t->sigmask_lock,flags);
16 - if (sigismember(¤t->signal, DRBD_SIG)) {
17 - sigdelset(¤t->signal, DRBD_SIG);
18 + if (sigismember(¤t->pending.signal, DRBD_SIG)) {
19 + sigdelset(¤t->pending.signal, DRBD_SIG);
20 recalc_sigpending(current);
21 spin_unlock_irqrestore(¤t->sigmask_lock,flags);
25 void drbd_dio_end(struct buffer_head *bh, int uptodate)
27 - struct request *req = bh->b_dev_id;
28 + struct request *req = bh->b_private;
30 // READs are sorted out in drbd_end_req().
31 drbd_end_req(req, RQ_DRBD_WRITTEN, uptodate);
35 bh->b_list = BUF_LOCKED;
37 + bh->b_private = req;
38 bh->b_end_io = drbd_dio_end;
43 del_timer(&accept_timeout);
44 spin_lock_irqsave(¤t->sigmask_lock,flags);
45 - if (sigismember(¤t->signal, DRBD_SIG)) {
46 - sigdelset(¤t->signal, DRBD_SIG);
47 + if (sigismember(¤t->pending.signal, DRBD_SIG)) {
48 + sigdelset(¤t->pending.signal, DRBD_SIG);
49 recalc_sigpending(current);
50 spin_unlock_irqrestore(¤t->sigmask_lock,
55 mark_buffer_uptodate(bh, 0);
56 - mark_buffer_dirty(bh, 1);
57 + mark_buffer_dirty(bh);
59 if (drbd_conf[minor].conf.wire_protocol == DRBD_PROT_B
60 && header.block_id != ID_SYNCER) {
64 if (drbd_conf[minor].conf.wire_protocol == DRBD_PROT_C) {
65 - if (drbd_conf[minor].unacked_cnt >= (NR_REQUEST / 4)) {
66 + if (drbd_conf[minor].unacked_cnt >= (QUEUE_NR_REQUESTS / 4)) {
67 run_task_queue(&tq_disk);
71 thi->t_state = Running;
73 spin_lock_irqsave(¤t->sigmask_lock,flags);
74 - if (sigismember(¤t->signal, SIGTERM)) {
75 - sigdelset(¤t->signal, SIGTERM);
76 + if (sigismember(¤t->pending.signal, SIGTERM)) {
77 + sigdelset(¤t->pending.signal, SIGTERM);
78 recalc_sigpending(current);
80 spin_unlock_irqrestore(¤t->sigmask_lock,flags);