]> git.pld-linux.org Git - projects/pld-builder.new.git/commitdiff
- reidented to 4 spaces... hope I haven't broken anything
authorMariusz Mazur <mmazur@pld-linux.org>
Thu, 9 Dec 2004 18:09:06 +0000 (18:09 +0000)
committercvs2git <feedback@pld-linux.org>
Sun, 24 Jun 2012 12:13:13 +0000 (12:13 +0000)
Changed files:
    PLD_Builder/acl.py -> 1.23
    PLD_Builder/bqueue.py -> 1.9
    PLD_Builder/build.py -> 1.7
    PLD_Builder/buildlogs.py -> 1.14
    PLD_Builder/chroot.py -> 1.13
    PLD_Builder/config.py -> 1.25
    PLD_Builder/deps.py -> 1.2
    PLD_Builder/file_sender.py -> 1.13
    PLD_Builder/ftp.py -> 1.11
    PLD_Builder/get_br.py -> 1.3
    PLD_Builder/gpg.py -> 1.9
    PLD_Builder/install_br.py -> 1.10
    PLD_Builder/load_balancer.py -> 1.9
    PLD_Builder/lock.py -> 1.4
    PLD_Builder/log.py -> 1.9
    PLD_Builder/loop.py -> 1.2
    PLD_Builder/mailer.py -> 1.17
    PLD_Builder/notify.py -> 1.4
    PLD_Builder/path.py -> 1.12
    PLD_Builder/pipeutil.py -> 1.2
    PLD_Builder/poldek.py -> 1.3
    PLD_Builder/report.py -> 1.27
    PLD_Builder/request.py -> 1.30
    PLD_Builder/request_fetcher.py -> 1.16
    PLD_Builder/request_handler.py -> 1.34
    PLD_Builder/rpm_builder.py -> 1.35
    PLD_Builder/srpm_builder.py -> 1.37
    PLD_Builder/status.py -> 1.4
    PLD_Builder/stopwatch.py -> 1.2
    PLD_Builder/upgrade.py -> 1.5
    PLD_Builder/util.py -> 1.6
    PLD_Builder/wrap.py -> 1.7

32 files changed:
PLD_Builder/acl.py
PLD_Builder/bqueue.py
PLD_Builder/build.py
PLD_Builder/buildlogs.py
PLD_Builder/chroot.py
PLD_Builder/config.py
PLD_Builder/deps.py
PLD_Builder/file_sender.py
PLD_Builder/ftp.py
PLD_Builder/get_br.py
PLD_Builder/gpg.py
PLD_Builder/install_br.py
PLD_Builder/load_balancer.py
PLD_Builder/lock.py
PLD_Builder/log.py
PLD_Builder/loop.py
PLD_Builder/mailer.py
PLD_Builder/notify.py
PLD_Builder/path.py
PLD_Builder/pipeutil.py
PLD_Builder/poldek.py
PLD_Builder/report.py
PLD_Builder/request.py
PLD_Builder/request_fetcher.py
PLD_Builder/request_handler.py
PLD_Builder/rpm_builder.py
PLD_Builder/srpm_builder.py
PLD_Builder/status.py
PLD_Builder/stopwatch.py
PLD_Builder/upgrade.py
PLD_Builder/util.py
PLD_Builder/wrap.py

index 826f4f465bbb930070484b22d3fcee90e39b47d8..3d48cee28bf9f693656ff7c29b27eb4fddb6ee21 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import ConfigParser
 import string
 import fnmatch
@@ -9,121 +11,121 @@ from mailer import Message
 from config import config
 
 class User:
-  def __init__(self, p, login):
-    self.login = login
-    self.privs = []
-    self.gpg_emails = []
-    self.mailto = ""
-    
-    if p.has_option(login, "gpg_emails"):
-      self.gpg_emails = string.split(p.get(login, "gpg_emails"))
-    else:
-      log.panic("acl: [%s] has no gpg_emails" % login)
-      
-    if p.has_option(login, "mailto"):
-      self.mailto = p.get(login, "mailto")
-    else:
-      if len(self.gpg_emails) > 0:
-        self.mailto = self.gpg_emails[0]
-      
-    if p.has_option(login, "privs"):
-      for p in string.split(p.get(login, "privs")):
-        l = string.split(p, ":")
-        if len(l) == 2:
-          p+=":*"
-        if len(l) not in (2,3) or l[0] == "" or l[1] == "":
-          log.panic("acl: invalid priv format: '%s' [%s]" % (p, login))
+    def __init__(self, p, login):
+        self.login = login
+        self.privs = []
+        self.gpg_emails = []
+        self.mailto = ""
+        
+        if p.has_option(login, "gpg_emails"):
+            self.gpg_emails = string.split(p.get(login, "gpg_emails"))
+        else:
+            log.panic("acl: [%s] has no gpg_emails" % login)
+            
+        if p.has_option(login, "mailto"):
+            self.mailto = p.get(login, "mailto")
+        else:
+            if len(self.gpg_emails) > 0:
+                self.mailto = self.gpg_emails[0]
+            
+        if p.has_option(login, "privs"):
+            for p in string.split(p.get(login, "privs")):
+                l = string.split(p, ":")
+                if len(l) == 2:
+                    p+=":*"
+                if len(l) not in (2,3) or l[0] == "" or l[1] == "":
+                    log.panic("acl: invalid priv format: '%s' [%s]" % (p, login))
+                else:
+                    self.privs.append(p)
         else:
-          self.privs.append(p)
-    else:
-      log.panic("acl: [%s] has no privs" % login)
+            log.panic("acl: [%s] has no privs" % login)
 
-  def can_do(self, what, where, branch=None):
-    if branch:
-        action = "%s:%s:%s" % (what, where, branch)
-    else:
-        action = "%s:%s:N-A" % (what, where)
-    for priv in self.privs:
-      if priv[0] == "!":
-        ret = 0
-        priv = priv[1:]
-      else:
-        ret = 1
-      pwhat,pwhere,pbranch=priv.split(":")
-      for pbranch in pbranch.split(","):
-        priv="%s:%s:%s" % (pwhat,pwhere,pbranch)
-        if fnmatch.fnmatch(action, priv):
-          return ret
-    return 0
+    def can_do(self, what, where, branch=None):
+        if branch:
+            action = "%s:%s:%s" % (what, where, branch)
+        else:
+            action = "%s:%s:N-A" % (what, where)
+        for priv in self.privs:
+            if priv[0] == "!":
+                ret = 0
+                priv = priv[1:]
+            else:
+                ret = 1
+            pwhat,pwhere,pbranch=priv.split(":")
+            for pbranch in pbranch.split(","):
+                priv="%s:%s:%s" % (pwhat,pwhere,pbranch)
+                if fnmatch.fnmatch(action, priv):
+                    return ret
+        return 0
 
-  def check_priority(self, prio, where):
-    for priv in self.privs:
-      val,builder=priv.split(":")[0:2]
-      if fnmatch.fnmatch(where, builder):
-       try:
-          val=int(val)
-        except ValueError:
-          continue
-        if prio>=val:
-          return prio
-       else:
-         return val
-    if prio<10:
-      prio=10
-    return prio
+    def check_priority(self, prio, where):
+        for priv in self.privs:
+            val,builder=priv.split(":")[0:2]
+            if fnmatch.fnmatch(where, builder):
+                try:
+                    val=int(val)
+                except ValueError:
+                    continue
+                if prio>=val:
+                    return prio
+                else:
+                    return val
+        if prio<10:
+            prio=10
+        return prio
 
-  def mail_to(self):
-    return self.mailto
+    def mail_to(self):
+        return self.mailto
 
-  def message_to(self):
-    m = Message()
-    m.set_headers(to = self.mail_to(), cc = config.builder_list)
-    return m
+    def message_to(self):
+        m = Message()
+        m.set_headers(to = self.mail_to(), cc = config.builder_list)
+        return m
 
-  def get_login(self):
-    return self.login
+    def get_login(self):
+        return self.login
 
 class ACL_Conf:
-  def __init__(self):
-    self.current_user = None
-    status.push("reading acl.conf")
-    p = ConfigParser.ConfigParser()
-    p.readfp(open(path.acl_conf))
-    self.users = {}
-    for login in p.sections():
-      if self.users.has_key(login):
-        log.panic("acl: duplicate login: %s" % login)
-        continue
-      user = User(p, login)
-      for e in user.gpg_emails:
-        if self.users.has_key(e):
-          log.panic("acl: user email colision %s <-> %s" % \
-                                (self.users[e].login, login))
-        else:
-          self.users[e] = user
-      self.users[login] = user
-    status.pop()
-  
-  def user_by_email(self, ems):
-    for e in ems:
-      if self.users.has_key(e):
-        return self.users[e]
-    return None
+    def __init__(self):
+        self.current_user = None
+        status.push("reading acl.conf")
+        p = ConfigParser.ConfigParser()
+        p.readfp(open(path.acl_conf))
+        self.users = {}
+        for login in p.sections():
+            if self.users.has_key(login):
+                log.panic("acl: duplicate login: %s" % login)
+                continue
+            user = User(p, login)
+            for e in user.gpg_emails:
+                if self.users.has_key(e):
+                    log.panic("acl: user email colision %s <-> %s" % \
+                              (self.users[e].login, login))
+                else:
+                    self.users[e] = user
+            self.users[login] = user
+        status.pop()
+    
+    def user_by_email(self, ems):
+        for e in ems:
+            if self.users.has_key(e):
+                return self.users[e]
+        return None
 
-  def user(self, l):
-    if not self.users.has_key(l):
-      log.panic("no such user: %s" % l)
-    return self.users[l]
+    def user(self, l):
+        if not self.users.has_key(l):
+            log.panic("no such user: %s" % l)
+        return self.users[l]
 
-  def set_current_user(self, u):
-    self.current_user = u
-    if u != None:
-      status.email = u.mail_to()
+    def set_current_user(self, u):
+        self.current_user = u
+        if u != None:
+            status.email = u.mail_to()
 
-  def current_user_login(self):
-    if self.current_user != None:
-      return self.current_user.login
-    else:
-      return ""
+    def current_user_login(self):
+        if self.current_user != None:
+            return self.current_user.login
+        else:
+            return ""
 
 acl = ACL_Conf()
index 12d0c15c0db54f161aabadcc5e5db41d0914a614..e19b16c735f1eb0903d423a15699bd18243d57f3 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import re
 import gzip
 import time
@@ -13,94 +15,94 @@ import util
 import log
 
 class B_Queue:
-  def __init__(self, filename):
-    self.name = filename
-    self.requests = []
-    self.fd = None
-
-  def dump(self, f):
-    self.requests.reverse()
-    for r in self.requests:
-      r.dump(f)
-    self.requests.reverse()
-  
-  def dump_html(self, f):
-    f.write("<html><head><title>PLD builder queue</title></head><body>\n")
-    self.requests.reverse()
-    for r in self.requests:
-      r.dump_html(f)
-    self.requests.reverse()
-    f.write("</body></html>\n")
-  
-  # read possibly compressed, signed queue
-  def read_signed(self):
-    if re.search(r"\.gz$", self.name):
-      f = gzip.open(self.name)
-    else:
-      f = open(self.name)
-    (signers, body) = gpg.verify_sig(f)
-    self.signers = signers
-    self.requests = request.parse_requests(body)
+    def __init__(self, filename):
+        self.name = filename
+        self.requests = []
+        self.fd = None
 
-  def _open(self):
-    if self.fd == None:
-      if os.access(self.name, os.F_OK):
-        self.fd = open(self.name, "r+")
-      else:
-        self.fd = open(self.name, "w+")
+    def dump(self, f):
+        self.requests.reverse()
+        for r in self.requests:
+            r.dump(f)
+        self.requests.reverse()
+    
+    def dump_html(self, f):
+        f.write("<html><head><title>PLD builder queue</title></head><body>\n")
+        self.requests.reverse()
+        for r in self.requests:
+            r.dump_html(f)
+        self.requests.reverse()
+        f.write("</body></html>\n")
     
-  def read(self):
-    self._open()
-    self.signers = []
-    if string.strip(self.fd.read()) == "":
-      # empty file, don't choke
-      self.requests = []
-      return
-    self.fd.seek(0)
-    self.requests = request.parse_requests(self.fd)
+    # read possibly compressed, signed queue
+    def read_signed(self):
+        if re.search(r"\.gz$", self.name):
+            f = gzip.open(self.name)
+        else:
+            f = open(self.name)
+        (signers, body) = gpg.verify_sig(f)
+        self.signers = signers
+        self.requests = request.parse_requests(body)
 
-  def _write_to(self, f):
-    f.write("<queue>\n")
-    for r in self.requests:
-      r.write_to(f)
-    f.write("</queue>\n")
+    def _open(self):
+        if self.fd == None:
+            if os.access(self.name, os.F_OK):
+                self.fd = open(self.name, "r+")
+            else:
+                self.fd = open(self.name, "w+")
+        
+    def read(self):
+        self._open()
+        self.signers = []
+        if string.strip(self.fd.read()) == "":
+            # empty file, don't choke
+            self.requests = []
+            return
+        self.fd.seek(0)
+        self.requests = request.parse_requests(self.fd)
 
-  def write(self):
-    self._open()
-    self.fd.seek(0)
-    self.fd.truncate(0)
-    self._write_to(self.fd)
-    self.fd.flush()
+    def _write_to(self, f):
+        f.write("<queue>\n")
+        for r in self.requests:
+            r.write_to(f)
+        f.write("</queue>\n")
 
-  def lock(self, no_block):
-    self._open()
-    op = fcntl.LOCK_EX
-    if no_block:
-      op = op + fcntl.LOCK_NB
-    try:
-      fcntl.flock(self.fd, op)
-      return 1
-    except IOError:
-      return 0
-  
-  def unlock(self):
-    fcntl.flock(self.fd, fcntl.LOCK_UN)
+    def write(self):
+        self._open()
+        self.fd.seek(0)
+        self.fd.truncate(0)
+        self._write_to(self.fd)
+        self.fd.flush()
+
+    def lock(self, no_block):
+        self._open()
+        op = fcntl.LOCK_EX
+        if no_block:
+            op = op + fcntl.LOCK_NB
+        try:
+            fcntl.flock(self.fd, op)
+            return 1
+        except IOError:
+            return 0
+    
+    def unlock(self):
+        fcntl.flock(self.fd, fcntl.LOCK_UN)
 
-  def write_signed(self, name):
-    sio = StringIO.StringIO()
-    self._write_to(sio)
-    sio.seek(0)
-    sio = gpg.sign(sio)
-    if os.access(name, os.F_OK): os.unlink(name)
-    if re.search(r"\.gz$", name):
-      f = gzip.open(name, "w", 6)
-    else:
-      f = open(name, "w")
-    util.sendfile(sio, f)
-    f.close()
+    def write_signed(self, name):
+        sio = StringIO.StringIO()
+        self._write_to(sio)
+        sio.seek(0)
+        sio = gpg.sign(sio)
+        if os.access(name, os.F_OK): os.unlink(name)
+        if re.search(r"\.gz$", name):
+            f = gzip.open(name, "w", 6)
+        else:
+            f = open(name, "w")
+        util.sendfile(sio, f)
+        f.close()
 
-  def add(self, req):
-    self.requests.append(req)
+    def add(self, req):
+        self.requests.append(req)
 
-  def value(self):
-    return self.requests
+    def value(self):
+        return self.requests
index 948e9b57b1402fc0d37baa7cff9913abed8acb70..0d263393a1fac8bf04f123d5925f1f027475f0f0 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import string
 import os
 import atexit
@@ -15,88 +17,88 @@ from config import config, init_conf
 
 
 def run_command(batch):
-  if "no-chroot" in batch.command_flags:
-    c = "%s >> %s 2>&1" % (batch.command, batch.logfile)
-    f = os.popen(c)
-    for l in f.xreadlines():
-      pass
-    r = f.close()
-    if r == None:
-      return 0
+    if "no-chroot" in batch.command_flags:
+        c = "%s >> %s 2>&1" % (batch.command, batch.logfile)
+        f = os.popen(c)
+        for l in f.xreadlines():
+            pass
+        r = f.close()
+        if r == None:
+            return 0
+        else:
+            return r
     else:
-      return r
-  else:
-    user = "root"
-    if "as-builder" in batch.command_flags:
-      user = "builder"
-    return chroot.run(batch.command, logfile = batch.logfile, user = user)
+        user = "root"
+        if "as-builder" in batch.command_flags:
+            user = "builder"
+        return chroot.run(batch.command, logfile = batch.logfile, user = user)
 
 def build_all(r, build_fnc):
-  status.email = r.requester_email
-  notify.begin(r)
-  tmp = path.spool_dir + util.uuid() + "/"
-  r.tmp_dir = tmp
-  os.mkdir(tmp)
-  atexit.register(util.clean_tmp, tmp)
+    status.email = r.requester_email
+    notify.begin(r)
+    tmp = path.spool_dir + util.uuid() + "/"
+    r.tmp_dir = tmp
+    os.mkdir(tmp)
+    atexit.register(util.clean_tmp, tmp)
 
-  log.notice("started processing %s" % r.id)
-  r.chroot_files = []
-  r.some_ok = 0
-  for batch in r.batches:
-    can_build = 1
-    failed_dep = ""
-    for dep in batch.depends_on:
-      if dep.build_failed:
-        can_build = 0
-        failed_dep = dep.spec
-    
-    if batch.is_command() and can_build:
-      batch.logfile = tmp + "command"
-      if config.builder in batch.builders:
-        log.notice("running %s" % batch.command)
-        stopwatch.start()
-        batch.build_failed = run_command(batch)
-        if batch.build_failed:
-          log.notice("running %s FAILED" % batch.command)
-          notify.add_batch(batch, "FAIL")
+    log.notice("started processing %s" % r.id)
+    r.chroot_files = []
+    r.some_ok = 0
+    for batch in r.batches:
+        can_build = 1
+        failed_dep = ""
+        for dep in batch.depends_on:
+            if dep.build_failed:
+                can_build = 0
+                failed_dep = dep.spec
+        
+        if batch.is_command() and can_build:
+            batch.logfile = tmp + "command"
+            if config.builder in batch.builders:
+                log.notice("running %s" % batch.command)
+                stopwatch.start()
+                batch.build_failed = run_command(batch)
+                if batch.build_failed:
+                    log.notice("running %s FAILED" % batch.command)
+                    notify.add_batch(batch, "FAIL")
+                else:
+                    r.some_ok = 1
+                    log.notice("running %s OK" % batch.command)
+                    notify.add_batch(batch, "OK")
+                batch.build_time = stopwatch.stop()
+                report.add_pld_builder_info(batch)
+                buildlogs.add(batch.logfile, failed = batch.build_failed)
+            else:
+                log.notice("not running command, not for me.")
+                batch.build_failed = 0
+                batch.log_line("queued command %s for other builders" % batch.command)
+                r.some_ok = 1
+                buildlogs.add(batch.logfile, failed = batch.build_failed)
+        elif can_build:
+            log.notice("building %s" % batch.spec)
+            stopwatch.start()
+            batch.logfile = tmp + batch.spec + ".log"
+            batch.gb_id=r.id
+            batch.requester=r.requester
+            batch.requester_email=r.requester_email
+            batch.build_failed = build_fnc(r, batch)
+            if batch.build_failed:
+                log.notice("building %s FAILED" % batch.spec)
+                notify.add_batch(batch, "FAIL")
+            else:
+                r.some_ok = 1
+                log.notice("building %s OK" % batch.spec)
+                notify.add_batch(batch, "OK")
+            batch.build_time = stopwatch.stop()
+            report.add_pld_builder_info(batch)
+            buildlogs.add(batch.logfile, failed = batch.build_failed)
         else:
-          r.some_ok = 1
-          log.notice("running %s OK" % batch.command)
-          notify.add_batch(batch, "OK")
-        batch.build_time = stopwatch.stop()
-        report.add_pld_builder_info(batch)
-        buildlogs.add(batch.logfile, failed = batch.build_failed)
-      else:
-        log.notice("not running command, not for me.")
-        batch.build_failed = 0
-        batch.log_line("queued command %s for other builders" % batch.command)
-        r.some_ok = 1
-        buildlogs.add(batch.logfile, failed = batch.build_failed)
-    elif can_build:
-      log.notice("building %s" % batch.spec)
-      stopwatch.start()
-      batch.logfile = tmp + batch.spec + ".log"
-      batch.gb_id=r.id
-      batch.requester=r.requester
-      batch.requester_email=r.requester_email
-      batch.build_failed = build_fnc(r, batch)
-      if batch.build_failed:
-        log.notice("building %s FAILED" % batch.spec)
-        notify.add_batch(batch, "FAIL")
-      else:
-        r.some_ok = 1
-        log.notice("building %s OK" % batch.spec)
-        notify.add_batch(batch, "OK")
-      batch.build_time = stopwatch.stop()
-      report.add_pld_builder_info(batch)
-      buildlogs.add(batch.logfile, failed = batch.build_failed)
-    else:
-      batch.build_failed = 1
-      batch.skip_reason = "SKIPED [%s failed]" % failed_dep
-      batch.logfile = None
-      batch.build_time = ""
-      log.notice("building %s %s" % (batch.spec, batch.skip_reason))
-      notify.add_batch(batch, "SKIP")
-      
-  buildlogs.flush()
-  chroot.run("rm -f %s" % string.join(r.chroot_files))
+            batch.build_failed = 1
+            batch.skip_reason = "SKIPED [%s failed]" % failed_dep
+            batch.logfile = None
+            batch.build_time = ""
+            log.notice("building %s %s" % (batch.spec, batch.skip_reason))
+            notify.add_batch(batch, "SKIP")
+            
+    buildlogs.flush()
+    chroot.run("rm -f %s" % string.join(r.chroot_files))
index 6c2cbfe1651b31da4f431e591fc2296993e16548..feb348f5db8bbb44d34ce95c010a59e46b4f342d 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import path
 import time
 import os
@@ -7,54 +9,54 @@ from config import config
 import util
 
 class Buildlogs_Queue:
-  def __init__(self):
-    self.queue = []
-    self.some_failed = 0
-
-  def init(self, g):
-    self.requester_email = g.requester_email
-
-  def add(self, logfile, failed):
-    # if /dev/null, don't even bother to store it
-    if config.buildlogs_url == "/dev/null":
-      return
-    name = re.sub(r"\.spec\.log", "", os.path.basename(logfile)) + ".bz2"
-    id = util.uuid()
-    os.system("bzip2 --best --force < %s > %s" \
-                % (logfile, path.buildlogs_queue_dir + id))
-
-    if failed: s = "FAIL"
-    else: s = "OK"
-    f = open(path.buildlogs_queue_dir + id + ".info", "w")
-    f.write("Status: %s\nEND\n" % s)
-    f.close()
-
-    self.queue.append({'name': name, 'id': id, 'failed': failed})
-
-  def flush(self):
-    def desc(l):
-      return """Target: %s/%s
+    def __init__(self):
+        self.queue = []
+        self.some_failed = 0
+
+    def init(self, g):
+        self.requester_email = g.requester_email
+
+    def add(self, logfile, failed):
+        # if /dev/null, don't even bother to store it
+        if config.buildlogs_url == "/dev/null":
+            return
+        name = re.sub(r"\.spec\.log", "", os.path.basename(logfile)) + ".bz2"
+        id = util.uuid()
+        os.system("bzip2 --best --force < %s > %s" \
+                    % (logfile, path.buildlogs_queue_dir + id))
+
+        if failed: s = "FAIL"
+        else: s = "OK"
+        f = open(path.buildlogs_queue_dir + id + ".info", "w")
+        f.write("Status: %s\nEND\n" % s)
+        f.close()
+
+        self.queue.append({'name': name, 'id': id, 'failed': failed})
+
+    def flush(self):
+        def desc(l):
+            return """Target: %s/%s
 Builder: %s
 Time: %d
 Type: buildlog
 Requester: %s
 END
 """ % (config.buildlogs_url, l['name'], config.builder, time.time(), self.requester_email)
-    
-    for l in self.queue:
-      f = open(path.buildlogs_queue_dir + l['id'] + ".desc", "w")
-      f.write(desc(l))
-      f.close()
+        
+        for l in self.queue:
+            f = open(path.buildlogs_queue_dir + l['id'] + ".desc", "w")
+            f.write(desc(l))
+            f.close()
 
 queue = Buildlogs_Queue()
 
 def init(r):
-  queue.init(r)
+    queue.init(r)
 
 def add(logfile, failed):
-  "Add new buildlog with specified status."
-  queue.add(logfile, failed)
+    "Add new buildlog with specified status."
+    queue.add(logfile, failed)
 
 def flush():
-  "Send buildlogs to server."
-  queue.flush()
+    "Send buildlogs to server."
+    queue.flush()
index 3ebdeb76d0aef3ef5849a51ddd5472be4dab5cdd..c683861332f69654a7c4ea650888f9c866fca843 100644 (file)
@@ -1,33 +1,35 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import os
 import re
 from config import config
 
 def quote(cmd):
-  return re.sub("([\"\\\\$`])", r"\\\1", cmd)
-  
+    return re.sub("([\"\\\\$`])", r"\\\1", cmd)
+    
 def command(cmd, user = None):
-  if user == None:
-    user = config.builder_user
-  return "%s sudo chroot %s su - %s -c \"export LC_ALL=C; %s\"" \
-                % (config.sudo_chroot_wrapper, config.chroot, user, quote(cmd))
-  
+    if user == None:
+        user = config.builder_user
+    return "%s sudo chroot %s su - %s -c \"export LC_ALL=C; %s\"" \
+            % (config.sudo_chroot_wrapper, config.chroot, user, quote(cmd))
+    
 def command_sh(cmd):
-  return "%s sudo chroot %s /bin/sh -c \"export LC_ALL=C; %s\"" \
-        % (config.sudo_chroot_wrapper, config.chroot, quote(cmd))
+    return "%s sudo chroot %s /bin/sh -c \"export LC_ALL=C; %s\"" \
+            % (config.sudo_chroot_wrapper, config.chroot, quote(cmd))
 
 def popen(cmd, user = "builder", mode = "r"):
-  f = os.popen(command(cmd, user), mode)
-  return f
-  
+    f = os.popen(command(cmd, user), mode)
+    return f
+    
 def run(cmd, user = "builder", logfile = None):
-  c = command(cmd, user)
-  if logfile != None:
-    c = "%s >> %s 2>&1" % (c, logfile)
-  f = os.popen(c)
-  for l in f.xreadlines():
-    pass
-  r = f.close()
-  if r == None:
-    return 0
-  else:
-    return r
+    c = command(cmd, user)
+    if logfile != None:
+        c = "%s >> %s 2>&1" % (c, logfile)
+    f = os.popen(c)
+    for l in f.xreadlines():
+        pass
+    r = f.close()
+    if r == None:
+        return 0
+    else:
+        return r
index baf0d11e6801f0b17075b04c83790604f06cca4c..4a68aa118f1b04be0223be1173174439b0570a66 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import ConfigParser
 import string
 import os
@@ -9,100 +11,100 @@ import status
 
 
 syslog_facilities = {
-  'kern': syslog.LOG_KERN,
-  'user': syslog.LOG_USER,
-  'mail': syslog.LOG_MAIL,
-  'daemon': syslog.LOG_DAEMON,
-  'auth': syslog.LOG_AUTH,
-  'lpr': syslog.LOG_LPR,
-  'news': syslog.LOG_NEWS,
-  'uucp': syslog.LOG_UUCP,
-  'cron': syslog.LOG_CRON,
-  'local0': syslog.LOG_LOCAL0,
-  'local1': syslog.LOG_LOCAL1,
-  'local2': syslog.LOG_LOCAL2,
-  'local3': syslog.LOG_LOCAL3,
-  'local4': syslog.LOG_LOCAL4,
-  'local5': syslog.LOG_LOCAL5,
-  'local6': syslog.LOG_LOCAL6,
-  'local7': syslog.LOG_LOCAL7
+    'kern': syslog.LOG_KERN,
+    'user': syslog.LOG_USER,
+    'mail': syslog.LOG_MAIL,
+    'daemon': syslog.LOG_DAEMON,
+    'auth': syslog.LOG_AUTH,
+    'lpr': syslog.LOG_LPR,
+    'news': syslog.LOG_NEWS,
+    'uucp': syslog.LOG_UUCP,
+    'cron': syslog.LOG_CRON,
+    'local0': syslog.LOG_LOCAL0,
+    'local1': syslog.LOG_LOCAL1,
+    'local2': syslog.LOG_LOCAL2,
+    'local3': syslog.LOG_LOCAL3,
+    'local4': syslog.LOG_LOCAL4,
+    'local5': syslog.LOG_LOCAL5,
+    'local6': syslog.LOG_LOCAL6,
+    'local7': syslog.LOG_LOCAL7
 }
 
 class Builder_Conf:
-  def __init__(self):
-    self.done = 0
-    pass
+    def __init__(self):
+        self.done = 0
+        pass
 
-  def read(self, builder):
-    p = ConfigParser.ConfigParser()
-    def get(o, d = None):
-      if p.has_option(builder, o):
-        return string.strip(p.get(builder, o))
-      elif p.has_option("all", o):
-        return string.strip(p.get("all", o))
-      elif d != None:
-        return d
-      else:
-        log.panic("cannot find %s::%s" % (builder, o))
-    
-    p.readfp(open(path.builder_conf))
+    def read(self, builder):
+        p = ConfigParser.ConfigParser()
+        def get(o, d = None):
+            if p.has_option(builder, o):
+                return string.strip(p.get(builder, o))
+            elif p.has_option("all", o):
+                return string.strip(p.get("all", o))
+            elif d != None:
+                return d
+            else:
+                log.panic("cannot find %s::%s" % (builder, o))
+        
+        p.readfp(open(path.builder_conf))
 
-    if p.has_option("all", "syslog"):
-      f = p.get("all", "syslog")
-      if f != "":
-        if syslog_facilities.has_key(f):
-          log.open_syslog("builder", syslog_facilities[f])
-        else:
-          log.panic("no such syslog facility: %s" % f)
+        if p.has_option("all", "syslog"):
+            f = p.get("all", "syslog")
+            if f != "":
+                if syslog_facilities.has_key(f):
+                    log.open_syslog("builder", syslog_facilities[f])
+                else:
+                    log.panic("no such syslog facility: %s" % f)
 
-    if builder == "src":
-      builder = get("src_builder", builder)
-    self.builder = builder
+        if builder == "src":
+            builder = get("src_builder", builder)
+        self.builder = builder
 
-    self.binary_builders = string.split(get("binary_builders"))
-    self.tag_prefixes = string.split(get("tag_prefixes", ""))
-    self.bot_email = get("bot_email", "")
-    self.control_url = get("control_url")
-    self.notify_email = get("notify_email")
-    self.admin_email = get("admin_email")
-    self.builder_list = get("builder_list", "")
-    status.admin = self.admin_email
-    status.builder_list = self.builder_list
-    self.email = self.admin_email
+        self.binary_builders = string.split(get("binary_builders"))
+        self.tag_prefixes = string.split(get("tag_prefixes", ""))
+        self.bot_email = get("bot_email", "")
+        self.control_url = get("control_url")
+        self.notify_email = get("notify_email")
+        self.admin_email = get("admin_email")
+        self.builder_list = get("builder_list", "")
+        status.admin = self.admin_email
+        status.builder_list = self.builder_list
+        self.email = self.admin_email
 
-    if builder == "all":
-      return
+        if builder == "all":
+            return
 
-    if builder not in p.sections():
-      log.panic("builder %s not in config file" % builder)
-    self.arch = get("arch")
-    self.chroot = get("chroot")
-    self.email = get("email")
-    self.buildlogs_url = get("buildlogs_url")
-    self.ftp_url = get("ftp_url")
-    self.job_slots = int(get("job_slots"))
-    self.max_load = float(get("max_load"))
-    self.control_url = get("control_url")
-    self.builder_user = get("builder_user", "builder")
-    self.sudo_chroot_wrapper = get("sudo_chroot_wrapper", "")
-    self.nice = get("nice", "0")
-    
-    f = get("syslog", "")
-    if f != "":
-      if syslog_facilities.has_key(f):
-        log.open_syslog(self.builder, syslog_facilities[f])
-      else:
-        log.panic("no such syslog facility: %s" % f)
+        if builder not in p.sections():
+            log.panic("builder %s not in config file" % builder)
+        self.arch = get("arch")
+        self.chroot = get("chroot")
+        self.email = get("email")
+        self.buildlogs_url = get("buildlogs_url")
+        self.ftp_url = get("ftp_url")
+        self.job_slots = int(get("job_slots"))
+        self.max_load = float(get("max_load"))
+        self.control_url = get("control_url")
+        self.builder_user = get("builder_user", "builder")
+        self.sudo_chroot_wrapper = get("sudo_chroot_wrapper", "")
+        self.nice = get("nice", "0")
+        
+        f = get("syslog", "")
+        if f != "":
+            if syslog_facilities.has_key(f):
+                log.open_syslog(self.builder, syslog_facilities[f])
+            else:
+                log.panic("no such syslog facility: %s" % f)
 
-    self.done = 1
+        self.done = 1
 
 config = Builder_Conf()
 
 def init_conf(builder):
-  os.environ['LC_ALL'] = "C"
-  status.push("reading builder config")
-  log.builder = builder
-  if builder == "": builder = "all"
-  config.read(builder)
-  log.builder = config.builder
-  status.pop()
+    os.environ['LC_ALL'] = "C"
+    status.push("reading builder config")
+    log.builder = builder
+    if builder == "": builder = "all"
+    config.read(builder)
+    log.builder = config.builder
+    status.pop()
index 4e3900705519334e9eef4250c061309d67a55110..20d056df2e742e5393faa0a39ed28abfd0a8ed5f 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import string
 from chroot import *
 from util import *
@@ -5,115 +7,115 @@ from util import *
 __all__ = ['compute_deps', 'remove_list']
 
 def compute_deps():
-  """Compute dependenecies between RPM installed on system.
-
-  Return dictionary from name of package to list of packages required by it.
-  Produce some warnings and progress information to stderr.
-  """
-  # pkg-name -> list of stuff returned by rpm -qR
-  rpm_req = {}
-  # --whatprovides ...
-  rpm_prov = {}
-  # list of required files
-  req_files = {}
-  
-  def get_req():
-    msg("rpm-req... ")
-    f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{REQUIRENAME}\n]'")
-    cur_pkg = None
-    while 1:
-      l = f.readline()
-      if l == "": break
-      l = string.strip(l)
-      if l == "@":
-        cur_pkg = string.strip(f.readline())
-        rpm_req[cur_pkg] = []
-       continue
-      rpm_req[cur_pkg].append(l)
-      if l[0] == '/':
-        req_files[l] = 1
-    f.close()
-    msg("done\n")
+    """Compute dependenecies between RPM installed on system.
 
-  def add_provides(pkg, what):
-    if rpm_prov.has_key(what):
-      msg("[%s: %s, %s] " % (what, rpm_prov[what], pkg))
-    else:
-      rpm_prov[what] = pkg
-  
-  def get_prov():
-    msg("rpm-prov... ")
-    f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{PROVIDENAME}\n]'")
-    cur_pkg = None
-    while 1:
-      l = f.readline()
-      if l == "": break
-      l = string.strip(l)
-      if l == "@":
-        cur_pkg = string.strip(f.readline())
-       continue
-      add_provides(cur_pkg, l)
-      if l[0] == '/':
-        # already provided
-        del req_files[l]
-    f.close()
-    msg("done\n")
-  def get_prov_files():
-    msg("rpm-files... ")
-    f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{FILENAMES}\n]'")
-    cur_pkg = None
-    while 1:
-      l = f.readline()
-      if l == "": break
-      l = string.strip(l)
-      if l == "@":
-        cur_pkg = string.strip(f.readline())
-       continue
-      if req_files.has_key(l):
-        add_provides(cur_pkg, l)
-    f.close()
-    msg("done\n")
+    Return dictionary from name of package to list of packages required by it.
+    Produce some warnings and progress information to stderr.
+    """
+    # pkg-name -> list of stuff returned by rpm -qR
+    rpm_req = {}
+    # --whatprovides ...
+    rpm_prov = {}
+    # list of required files
+    req_files = {}
+    
+    def get_req():
+        msg("rpm-req... ")
+        f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{REQUIRENAME}\n]'")
+        cur_pkg = None
+        while 1:
+            l = f.readline()
+            if l == "": break
+            l = string.strip(l)
+            if l == "@":
+                cur_pkg = string.strip(f.readline())
+                rpm_req[cur_pkg] = []
+                continue
+            rpm_req[cur_pkg].append(l)
+            if l[0] == '/':
+                req_files[l] = 1
+        f.close()
+        msg("done\n")
 
-  def compute():
-    msg("computing deps... ")
-    for pkg, reqs in rpm_req.items():
-      pkg_reqs = []
-      for req in reqs:
-        if req[0:7] == "rpmlib(": continue
-        if rpm_prov.has_key(req):
-          if rpm_prov[req] not in pkg_reqs:
-            pkg_reqs.append(rpm_prov[req])
+    def add_provides(pkg, what):
+        if rpm_prov.has_key(what):
+            msg("[%s: %s, %s] " % (what, rpm_prov[what], pkg))
         else:
-          msg("[%s: %s] " % (pkg, req))
-      requires[pkg] = pkg_reqs
-    msg("done\n")
+            rpm_prov[what] = pkg
     
-  # map from pkg-name to list of pkg-names required by it
-  # this is result
-  requires = {}
+    def get_prov():
+        msg("rpm-prov... ")
+        f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{PROVIDENAME}\n]'")
+        cur_pkg = None
+        while 1:
+            l = f.readline()
+            if l == "": break
+            l = string.strip(l)
+            if l == "@":
+                cur_pkg = string.strip(f.readline())
+                continue
+            add_provides(cur_pkg, l)
+            if l[0] == '/':
+                # already provided
+                del req_files[l]
+        f.close()
+        msg("done\n")
+    def get_prov_files():
+        msg("rpm-files... ")
+        f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{FILENAMES}\n]'")
+        cur_pkg = None
+        while 1:
+            l = f.readline()
+            if l == "": break
+            l = string.strip(l)
+            if l == "@":
+                cur_pkg = string.strip(f.readline())
+                continue
+            if req_files.has_key(l):
+                add_provides(cur_pkg, l)
+        f.close()
+        msg("done\n")
+
+    def compute():
+        msg("computing deps... ")
+        for pkg, reqs in rpm_req.items():
+            pkg_reqs = []
+            for req in reqs:
+                if req[0:7] == "rpmlib(": continue
+                if rpm_prov.has_key(req):
+                    if rpm_prov[req] not in pkg_reqs:
+                        pkg_reqs.append(rpm_prov[req])
+                else:
+                    msg("[%s: %s] " % (pkg, req))
+            requires[pkg] = pkg_reqs
+        msg("done\n")
+        
+    # map from pkg-name to list of pkg-names required by it
+    # this is result
+    requires = {}
 
-  get_req()
-  get_prov()
-  get_prov_files()
-  compute()
-  return requires
+    get_req()
+    get_prov()
+    get_prov_files()
+    compute()
+    return requires
 
 def remove_list(req, need):
-  """List of packages scheduled for removal.
-  
-  Given dependency information and list of needed packages compute list
-  of packages that don't need to be present.
-  """
-  need_m = {}
-  def close(n):
-    if need_m.has_key(n): return
-    need_m[n] = 1
-    if not req.has_key(n): return
-    for k in req[n]:
-      close(k)
-  for n in need: close(n)
-  rm = []
-  for p in req.keys():
-    if not need_m.has_key(p): rm.append(p)
-  return rm
+    """List of packages scheduled for removal.
+    
+    Given dependency information and list of needed packages compute list
+    of packages that don't need to be present.
+    """
+    need_m = {}
+    def close(n):
+        if need_m.has_key(n): return
+        need_m[n] = 1
+        if not req.has_key(n): return
+        for k in req[n]:
+            close(k)
+    for n in need: close(n)
+    rm = []
+    for p in req.keys():
+        if not need_m.has_key(p): rm.append(p)
+    return rm
index 17b637a47fbbaee3fd31a9d884eff0131fba7aa5..92651e37d4f8baf21e2973e8e5a680514d27bad5 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import glob
 import re
 import string
@@ -18,155 +20,155 @@ import lock
 retries_times = [5 * 60, 15 * 60, 60 * 60, 2 * 60 * 60, 5 * 60 * 60]
 
 def read_name_val(file):
-  f = open(file)
-  r = {'_file': file[:-5], '_desc': file}
-  rx = re.compile(r"^([^:]+)\s*:(.*)$")
-  for l in f.xreadlines():
-    if l == "END\n":
-      f.close()
-      return r
-    m = rx.search(l)
-    if m:
-      r[m.group(1)] = string.strip(m.group(2))
-    else:
-      break
-  f.close()
-  return None
+    f = open(file)
+    r = {'_file': file[:-5], '_desc': file}
+    rx = re.compile(r"^([^:]+)\s*:(.*)$")
+    for l in f.xreadlines():
+        if l == "END\n":
+            f.close()
+            return r
+        m = rx.search(l)
+        if m:
+            r[m.group(1)] = string.strip(m.group(2))
+        else:
+            break
+    f.close()
+    return None
 
 def scp_file(src, target):
-  global problem
-  f = os.popen("scp -v -B -p %s %s 2>&1 < /dev/null" % (src, target))
-  problem = f.read()
-  return f.close()
+    global problem
+    f = os.popen("scp -v -B -p %s %s 2>&1 < /dev/null" % (src, target))
+    problem = f.read()
+    return f.close()
 
 def copy_file(src, target):
-  try:
-    shutil.copyfile(src, target)
-    return 0
-  except:
-    global problem
-    exctype, value = sys.exc_info()[:2]
-    problem = "cannot copy file: %s" % traceback.format_exception_only(exctype, value)
-    return 1
+    try:
+        shutil.copyfile(src, target)
+        return 0
+    except:
+        global problem
+        exctype, value = sys.exc_info()[:2]
+        problem = "cannot copy file: %s" % traceback.format_exception_only(exctype, value)
+        return 1
 
 def rsync_file(src, target, host):
-  global problem
-  p = open(path.rsync_password_file, "r")
-  password = None
-  for l in p.xreadlines():
-    l = string.split(l)
-    if len(l) >= 2 and l[0] == host:
-      password = l[1]
-  p.close()
-  rsync = "rsync --verbose --archive"
-  if password != None:
-    p = open(".rsync.pass", "w")
-    os.chmod(".rsync.pass", 0600)
-    p.write("%s\n" % password)
+    global problem
+    p = open(path.rsync_password_file, "r")
+    password = None
+    for l in p.xreadlines():
+        l = string.split(l)
+        if len(l) >= 2 and l[0] == host:
+            password = l[1]
     p.close()
-    rsync += " --password-file .rsync.pass"
-  f = os.popen("%s %s %s 2>&1 < /dev/null" % (rsync, src, target))
-  problem = f.read()
-  res = f.close()
-  if password != None: os.unlink(".rsync.pass")
-  return f.close()
-  
+    rsync = "rsync --verbose --archive"
+    if password != None:
+        p = open(".rsync.pass", "w")
+        os.chmod(".rsync.pass", 0600)
+        p.write("%s\n" % password)
+        p.close()
+        rsync += " --password-file .rsync.pass"
+    f = os.popen("%s %s %s 2>&1 < /dev/null" % (rsync, src, target))
+    problem = f.read()
+    res = f.close()
+    if password != None: os.unlink(".rsync.pass")
+    return f.close()
+    
 def send_file(src, target):
-  log.notice("sending %s" % target)
-  m = re.match('rsync://([^/]+)/.*', target)
-  if m:
-    return rsync_file(src, target, host = m.group(1))
-  if target != "" and target[0] == '/':
-    return copy_file(src, target)
-  m = re.match('scp://([^@:]+@[^/:]+)(:|)(.*)', target)
-  if m:
-    return scp_file(src, m.group(1) + ":" + m.group(3))
-  log.alert("unsupported protocol: %s" % target)
-  # pretend everything went OK, so file is removed from queue,
-  # and doesn't cause any additional problems
-  return 0
+    log.notice("sending %s" % target)
+    m = re.match('rsync://([^/]+)/.*', target)
+    if m:
+        return rsync_file(src, target, host = m.group(1))
+    if target != "" and target[0] == '/':
+        return copy_file(src, target)
+    m = re.match('scp://([^@:]+@[^/:]+)(:|)(.*)', target)
+    if m:
+        return scp_file(src, m.group(1) + ":" + m.group(3))
+    log.alert("unsupported protocol: %s" % target)
+    # pretend everything went OK, so file is removed from queue,
+    # and doesn't cause any additional problems
+    return 0
 
 def maybe_flush_queue(dir):
-  retry_delay = 0
-  try:
-    f = open(dir + "retry-at")
-    last_retry = int(string.strip(f.readline()))
-    retry_delay = int(string.strip(f.readline()))
-    f.close()
-    if last_retry + retry_delay > time.time():
-      return
-    os.unlink(dir + "retry-at")
-  except:
-    pass
-    
-  status.push("flushing %s" % dir)
+    retry_delay = 0
+    try:
+        f = open(dir + "retry-at")
+        last_retry = int(string.strip(f.readline()))
+        retry_delay = int(string.strip(f.readline()))
+        f.close()
+        if last_retry + retry_delay > time.time():
+            return
+        os.unlink(dir + "retry-at")
+    except:
+        pass
+        
+    status.push("flushing %s" % dir)
 
-  if flush_queue(dir):
-    f = open(dir + "retry-at", "w")
-    if retry_delay in retries_times:
-      idx = retries_times.index(retry_delay)
-      if idx < len(retries_times) - 1: idx += 1
-    else:
-      idx = 0
-    f.write("%d\n%d\n" % (time.time(), retries_times[idx]))
-    f.close()
+    if flush_queue(dir):
+        f = open(dir + "retry-at", "w")
+        if retry_delay in retries_times:
+            idx = retries_times.index(retry_delay)
+            if idx < len(retries_times) - 1: idx += 1
+        else:
+            idx = 0
+        f.write("%d\n%d\n" % (time.time(), retries_times[idx]))
+        f.close()
 
-  status.pop()
+    status.pop()
 
 def flush_queue(dir):
-  q = []
-  os.chdir(dir)
-  for f in glob.glob(dir + "/*.desc"):
-    d = read_name_val(f)
-    if d != None: q.append(d)
-  def mycmp(x, y):
-    rc = cmp(x['Time'], y['Time'])
-    if (rc == 0):
-      return cmp(x['Type'], y['Type'])
-    else:
-      return rc
-  q.sort(mycmp)
-  
-  error = None
-  remaining = q
-  for d in q:
-    if send_file(d['_file'], d['Target']):
-      error = d
-      break
-    if os.access(d['_file'] + ".info", os.F_OK):
-      if send_file(d['_file'] + ".info", d['Target'] + ".info"):
-        error = d
-        break
-    os.unlink(d['_file'])
-    os.unlink(d['_desc'])
-    remaining = q[1:]
+    q = []
+    os.chdir(dir)
+    for f in glob.glob(dir + "/*.desc"):
+        d = read_name_val(f)
+        if d != None: q.append(d)
+    def mycmp(x, y):
+        rc = cmp(x['Time'], y['Time'])
+        if (rc == 0):
+            return cmp(x['Type'], y['Type'])
+        else:
+            return rc
+    q.sort(mycmp)
     
-  if error != None:
-    emails = {}
-    emails[config.admin_email] = 1
-    for d in remaining:
-      if d.has_key('Requester'):
-        emails[d['Requester']] = 1
-    e = emails.keys()
-    m = mailer.Message()
-    m.set_headers(to = string.join(e, ", "), 
-                  subject = "builder queue problem")
-    m.write("there were problems sending files from queue %s:\n" % dir)
-    m.write("problem: %s\n" % problem)
-    m.send()
-    log.error("error sending files from %s: %s" % (dir, problem))
-    return 1
+    error = None
+    remaining = q
+    for d in q:
+        if send_file(d['_file'], d['Target']):
+            error = d
+            break
+        if os.access(d['_file'] + ".info", os.F_OK):
+            if send_file(d['_file'] + ".info", d['Target'] + ".info"):
+                error = d
+                break
+        os.unlink(d['_file'])
+        os.unlink(d['_desc'])
+        remaining = q[1:]
+        
+    if error != None:
+        emails = {}
+        emails[config.admin_email] = 1
+        for d in remaining:
+            if d.has_key('Requester'):
+                emails[d['Requester']] = 1
+        e = emails.keys()
+        m = mailer.Message()
+        m.set_headers(to = string.join(e, ", "), 
+                      subject = "builder queue problem")
+        m.write("there were problems sending files from queue %s:\n" % dir)
+        m.write("problem: %s\n" % problem)
+        m.send()
+        log.error("error sending files from %s: %s" % (dir, problem))
+        return 1
 
-  return 0
+    return 0
 
 problem = ""
 
 def main():
-  if lock.lock("sending-files", non_block = 1) == None:
-    return
-  init_conf("")
-  maybe_flush_queue(path.buildlogs_queue_dir)
-  maybe_flush_queue(path.ftp_queue_dir)
+    if lock.lock("sending-files", non_block = 1) == None:
+        return
+    init_conf("")
+    maybe_flush_queue(path.buildlogs_queue_dir)
+    maybe_flush_queue(path.ftp_queue_dir)
 
 if __name__ == '__main__':
-  loop.run_loop(main)
+    loop.run_loop(main)
index 01d2afce3f37409df8f0e22349e43e7f5ff6a74b..3bb7a49839f4c0eca9e53f77279482a805b58eb1 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import path
 import os
 import shutil
@@ -7,62 +9,62 @@ from config import config
 import util
 
 class FTP_Queue:
-  def __init__(self):
-    self.queue = None
-    self.some_failed = 0
-    self.status = ""
+    def __init__(self):
+        self.queue = None
+        self.some_failed = 0
+        self.status = ""
 
-  def init(self, g):
-    self.queue = []
-    self.requester_email = g.requester_email
-    self.url = config.ftp_url
-    
-  def add(self, file, type):
-    # if /dev/null, say bye bye
-    if self.url == "/dev/null":
-      return
-    name = os.path.basename(file)
-    id = util.uuid()
-    shutil.copy(file, path.ftp_queue_dir + id)
-    self.queue.append({'name': name, 'id': id, 'type': type})
-    st = os.stat(path.ftp_queue_dir + id)
-    self.status += "%10d %s\n" % (st.st_size, name)
+    def init(self, g):
+        self.queue = []
+        self.requester_email = g.requester_email
+        self.url = config.ftp_url
+        
+    def add(self, file, type):
+        # if /dev/null, say bye bye
+        if self.url == "/dev/null":
+            return
+        name = os.path.basename(file)
+        id = util.uuid()
+        shutil.copy(file, path.ftp_queue_dir + id)
+        self.queue.append({'name': name, 'id': id, 'type': type})
+        st = os.stat(path.ftp_queue_dir + id)
+        self.status += "%10d %s\n" % (st.st_size, name)
 
-  def flush(self):
-    def desc(l):
-      return """Target: %s/%s
+    def flush(self):
+        def desc(l):
+            return """Target: %s/%s
 Builder: %s
 Time: %d
 Type: %s
 Requester: %s
 END
 """ % (self.url, l['name'], config.builder, time.time(), l['type'], self.requester_email)
-    
-    for l in self.queue:
-      f = open(path.ftp_queue_dir + l['id'] + ".desc", "w")
-      f.write(desc(l))
-      f.close()
+        
+        for l in self.queue:
+            f = open(path.ftp_queue_dir + l['id'] + ".desc", "w")
+            f.write(desc(l))
+            f.close()
 
-  def kill(self):
-    for l in self.queue:
-      os.unlink(path.ftp_queue_dir + l)
+    def kill(self):
+        for l in self.queue:
+            os.unlink(path.ftp_queue_dir + l)
 
 queue = FTP_Queue()
 
 def add(f, type="rpm"):
-  queue.add(f, type)
+    queue.add(f, type)
 
 def flush():
-  queue.flush()
-  
+    queue.flush()
+    
 def kill():
-  queue.kill()
+    queue.kill()
 
 def init(r):
-  queue.init(r)
+    queue.init(r)
 
 def status():
-  return queue.status
-  
+    return queue.status
+    
 def clear_status():
-  queue.status = ""
+    queue.status = ""
index c719e5dff4ca709def7170551362f154f114edbf..5f5efe5e942a00652b9cdb4668799a6ff169a604 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import re
 import string
 import xreadlines
@@ -5,97 +7,97 @@ from util import *
 
 
 def get_build_requires(spec, bconds_with, bconds_without):
-  cond_rx = re.compile(r"%\{(\!\?|\?\!|\?)([a-zA-Z0-9_+]+)\s*:([^%\{\}]*)\}")
-  
-  def expand_conds(l):
-    def expand_one(m):
-      if m.group(1) == "?":
-        if macros.has_key(m.group(2)):
-          return m.group(3)
-      else:
-        if not macros.has_key(m.group(2)):
-          return m.group(3)
-      return ""
+    cond_rx = re.compile(r"%\{(\!\?|\?\!|\?)([a-zA-Z0-9_+]+)\s*:([^%\{\}]*)\}")
     
-    for i in range(10):
-      l = cond_rx.sub(expand_one, l)
-      if len(l) > 1000: break
-
-    return l
-
-  macro_rx = re.compile(r"%\{([a-zA-Z0-9_+]+)\}")
-  def expand_macros(l):
-    def expand_one(m):
-      if macros.has_key(m.group(1)):
-        return string.strip(macros[m.group(1)])
-      else:
-        return m.group(0) # don't change
+    def expand_conds(l):
+        def expand_one(m):
+            if m.group(1) == "?":
+                if macros.has_key(m.group(2)):
+                    return m.group(3)
+            else:
+                if not macros.has_key(m.group(2)):
+                    return m.group(3)
+            return ""
         
-    for i in range(10):
-      l = macro_rx.sub(expand_one, l)
-      if len(l) > 1000: break
-      
-    return expand_conds(l)
-  
-  simple_br_rx = re.compile(r"^BuildRequires\s*:\s*([^\s]+)", re.I)
-  bcond_rx = re.compile(r"^%bcond_(with|without)\s+([^\s]+)")
-  version_rx = re.compile(r"^Version\s*:\s*([^\s]+)", re.I)
-  release_rx = re.compile(r"^Release\s*:\s*([^\s]+)", re.I)
-  name_rx = re.compile(r"^Name\s*:\s*([^\s]+)", re.I)
-  define_rx = re.compile(r"^\%define\s+([a-zA-Z0-9_+]+)\s+(.*)", re.I)
-  any_br_rx = re.compile(r"BuildRequires", re.I)
-  
-  macros = {}
-  for b in bconds_with:
-    macros["_with_%s" % b] = 1
-  for b in bconds_without:
-    macros["_without_%s" % b] = 1
+        for i in range(10):
+            l = cond_rx.sub(expand_one, l)
+            if len(l) > 1000: break
 
-  macros["__perl"] = "/usr/bin/perl"
-  macros["_bindir"] = "/usr/bin"
-  macros["_sbindir"] = "/usr/sbin"
-  macros["kgcc_package"] = "gcc"
+        return l
 
-  build_req = []
+    macro_rx = re.compile(r"%\{([a-zA-Z0-9_+]+)\}")
+    def expand_macros(l):
+        def expand_one(m):
+            if macros.has_key(m.group(1)):
+                return string.strip(macros[m.group(1)])
+            else:
+                return m.group(0) # don't change
+                
+        for i in range(10):
+            l = macro_rx.sub(expand_one, l)
+            if len(l) > 1000: break
+            
+        return expand_conds(l)
     
-  f = open(spec)
-  for l in xreadlines.xreadlines(f):
-    l = string.strip(l)
-    if l == "%changelog": break
+    simple_br_rx = re.compile(r"^BuildRequires\s*:\s*([^\s]+)", re.I)
+    bcond_rx = re.compile(r"^%bcond_(with|without)\s+([^\s]+)")
+    version_rx = re.compile(r"^Version\s*:\s*([^\s]+)", re.I)
+    release_rx = re.compile(r"^Release\s*:\s*([^\s]+)", re.I)
+    name_rx = re.compile(r"^Name\s*:\s*([^\s]+)", re.I)
+    define_rx = re.compile(r"^\%define\s+([a-zA-Z0-9_+]+)\s+(.*)", re.I)
+    any_br_rx = re.compile(r"BuildRequires", re.I)
     
-    # %bcond_with..
-    m = bcond_rx.search(l)
-    if m:
-      bcond = m.group(2)
-      if m.group(1) == "with":
-        if macros.has_key("_with_%s" % bcond): 
-          macros["with_%s" % bcond] = 1
-      else:
-        if not macros.has_key("_without_%s" % bcond): 
-          macros["with_%s" % bcond] = 1
-      continue
-  
-    # name,version,release
-    m = version_rx.search(l)
-    if m: macros["version"] = m.group(1)
-    m = release_rx.search(l)
-    if m: macros["release"] = m.group(1)
-    m = name_rx.search(l)
-    if m: macros["name"] = m.group(1)
+    macros = {}
+    for b in bconds_with:
+        macros["_with_%s" % b] = 1
+    for b in bconds_without:
+        macros["_without_%s" % b] = 1
+
+    macros["__perl"] = "/usr/bin/perl"
+    macros["_bindir"] = "/usr/bin"
+    macros["_sbindir"] = "/usr/sbin"
+    macros["kgcc_package"] = "gcc"
 
-    # %define
-    m = define_rx.search(l)
-    if m: macros[m.group(1)] = m.group(2)
+    build_req = []
+        
+    f = open(spec)
+    for l in xreadlines.xreadlines(f):
+        l = string.strip(l)
+        if l == "%changelog": break
+        
+        # %bcond_with..
+        m = bcond_rx.search(l)
+        if m:
+            bcond = m.group(2)
+            if m.group(1) == "with":
+                if macros.has_key("_with_%s" % bcond): 
+                    macros["with_%s" % bcond] = 1
+            else:
+                if not macros.has_key("_without_%s" % bcond): 
+                    macros["with_%s" % bcond] = 1
+            continue
     
-    # *BuildRequires*
-    if any_br_rx.search(l):
-      l = expand_macros(l)
-      m = simple_br_rx.search(l)
-      if m:
-        build_req.append(m.group(1))
-      else:
-        if l <> "" and l[0] <> '#':
-          msg("spec error (%s): %s\n" % (spec, l))
+        # name,version,release
+        m = version_rx.search(l)
+        if m: macros["version"] = m.group(1)
+        m = release_rx.search(l)
+        if m: macros["release"] = m.group(1)
+        m = name_rx.search(l)
+        if m: macros["name"] = m.group(1)
+
+        # %define
+        m = define_rx.search(l)
+        if m: macros[m.group(1)] = m.group(2)
+        
+        # *BuildRequires*
+        if any_br_rx.search(l):
+            l = expand_macros(l)
+            m = simple_br_rx.search(l)
+            if m:
+                build_req.append(m.group(1))
+            else:
+                if l <> "" and l[0] <> '#':
+                    msg("spec error (%s): %s\n" % (spec, l))
 
-  for x in build_req:
-    print x
+    for x in build_req:
+        print x
index 2c4e8e2be455230b14183093ac9c28e8a80fc7f0..d80e4a12a4fe48ad647708194e6826bff703e424 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import popen2
 import re
 import StringIO
@@ -6,25 +8,25 @@ import util
 import pipeutil
 
 def verify_sig(buf):
-  """Check signature.
-  
-  Given email as file-like object, return (signer-emails, signed-body).
-  where signer-emails is lists of strings, and signed-body is StringIO
-  object.
-  """
-  (gpg_out, gpg_in, gpg_err) = popen2.popen3("gpg --batch --no-tty --decrypt")
-  body = pipeutil.rw_pipe(buf, gpg_in, gpg_out)
-  rx = re.compile("^gpg: Good signature from .*<([^>]+)>")
-  emails = []
-  for l in gpg_err.xreadlines():
-    m = rx.match(l)
-    if m:
-      emails.append(m.group(1))
-  gpg_err.close()
-  return (emails, body)
+    """Check signature.
+    
+    Given email as file-like object, return (signer-emails, signed-body).
+    where signer-emails is lists of strings, and signed-body is StringIO
+    object.
+    """
+    (gpg_out, gpg_in, gpg_err) = popen2.popen3("gpg --batch --no-tty --decrypt")
+    body = pipeutil.rw_pipe(buf, gpg_in, gpg_out)
+    rx = re.compile("^gpg: Good signature from .*<([^>]+)>")
+    emails = []
+    for l in gpg_err.xreadlines():
+        m = rx.match(l)
+        if m:
+            emails.append(m.group(1))
+    gpg_err.close()
+    return (emails, body)
 
 def sign(buf):
-  (gpg_out, gpg_in, gpg_err) = popen2.popen3("gpg --batch --no-tty --clearsign")
-  body = pipeutil.rw_pipe(buf, gpg_in, gpg_out)
-  gpg_err.close()
-  return body
+    (gpg_out, gpg_in, gpg_err) = popen2.popen3("gpg --batch --no-tty --clearsign")
+    body = pipeutil.rw_pipe(buf, gpg_in, gpg_out)
+    gpg_err.close()
+    return body
index 44f8b0e21d6ef2cc499360b75ec7280eb6d08078..eb060b8bd6a57259a79a2449a3f45754a53628d9 100644 (file)
@@ -1,31 +1,33 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import re
 import string
 
 import chroot
 
 def install_br(r, b):
-  cmd = "cd rpm/SPECS; TMPDIR=$HOME/%s rpmbuild --nobuild %s %s 2>&1" \
-        % (b.b_id, b.bconds_string(), b.spec)
-  f = chroot.popen(cmd)
-  rx = re.compile(r"^\s*([^\s]+) .*is needed by")
-  needed = {}
-  b.log_line("checking BR")
-  for l in f.xreadlines():
-    b.log_line("rpm: %s" % l)
-    m = rx.search(l)
-    if m: needed[m.group(1)] = 1
-  f.close()
-  if len(needed) == 0:
-    b.log_line("no BR needed")
-    return
-  nbr = ""
-  for bre in needed.keys():
-    nbr = nbr + " " + re.escape(bre)
-  br = string.strip(nbr)
-  b.log_line("installing BR: %s" % br)
-  res = chroot.run("poldek --up; poldek --upa; poldek --unique-pkg-names -v --upgrade %s" % br,
-             user = "root",
-             logfile = b.logfile)
-  if res != 0:
-    b.log_line("error: BR installation failed")
-  return res
+    cmd = "cd rpm/SPECS; TMPDIR=$HOME/%s rpmbuild --nobuild %s %s 2>&1" \
+                % (b.b_id, b.bconds_string(), b.spec)
+    f = chroot.popen(cmd)
+    rx = re.compile(r"^\s*([^\s]+) .*is needed by")
+    needed = {}
+    b.log_line("checking BR")
+    for l in f.xreadlines():
+        b.log_line("rpm: %s" % l)
+        m = rx.search(l)
+        if m: needed[m.group(1)] = 1
+    f.close()
+    if len(needed) == 0:
+        b.log_line("no BR needed")
+        return
+    nbr = ""
+    for bre in needed.keys():
+        nbr = nbr + " " + re.escape(bre)
+    br = string.strip(nbr)
+    b.log_line("installing BR: %s" % br)
+    res = chroot.run("poldek --up; poldek --upa; poldek --unique-pkg-names -v --upgrade %s" % br,
+                         user = "root",
+                         logfile = b.logfile)
+    if res != 0:
+        b.log_line("error: BR installation failed")
+    return res
index 87c7bc22beb03170cebc0b3dddddf400ec1f5340..34f4b0bd4c6aa9b960e8038709918d68e53bbe1c 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import string
 import os
 import time
@@ -17,55 +19,55 @@ from config import config, init_conf
 # it is determined based upon spool/got_lock file, which is also
 # updated to be short
 def builders_order():
-  bs = {}
-  bl = []
-  for b in config.binary_builders:
-    bs[b] = 0
-    bl.append(b)
+    bs = {}
+    bl = []
+    for b in config.binary_builders:
+        bs[b] = 0
+        bl.append(b)
+        
+    lck = lock.lock("got-lock")
+    f = open(path.got_lock_file, "r+")
+    line_no = 0
     
-  lck = lock.lock("got-lock")
-  f = open(path.got_lock_file, "r+")
-  line_no = 0
-  
-  for l in f.xreadlines():
-    line_no += 1
-    b = string.strip(l)
-    if bs.has_key(b):
-      bs[b] = line_no
-    else:
-      log.alert("found strange lock in got-lock: %s" % b)
+    for l in f.xreadlines():
+        line_no += 1
+        b = string.strip(l)
+        if bs.has_key(b):
+            bs[b] = line_no
+        else:
+            log.alert("found strange lock in got-lock: %s" % b)
 
-  def mycmp(b1, b2):
-    return cmp(bs[b1], bs[b2])
-    
-  bl.sort(mycmp)
+    def mycmp(b1, b2):
+        return cmp(bs[b1], bs[b2])
+        
+    bl.sort(mycmp)
 
-  f.seek(0)
-  f.truncate(0)
-  for l in bl: f.write(l + "\n")
-  f.close()
-  lck.close()
+    f.seek(0)
+    f.truncate(0)
+    for l in bl: f.write(l + "\n")
+    f.close()
+    lck.close()
 
-  return bl
+    return bl
 
 def run_rpm_builder(b):
-  if os.fork() == 0:
-    return
-  else:
-    rpm_builder.main_for(b)
-    sys.exit(0)
+    if os.fork() == 0:
+        return
+    else:
+        rpm_builder.main_for(b)
+        sys.exit(0)
 
 def main():
-  init_conf("")
-  for b in builders_order():
-    run_rpm_builder(b)
-    # give builder some time to aquire lock
-    time.sleep(1)
-  # wait for children to die out
-  try:
-    while 1: os.wait()
-  except:
-    pass
+    init_conf("")
+    for b in builders_order():
+        run_rpm_builder(b)
+        # give builder some time to aquire lock
+        time.sleep(1)
+    # wait for children to die out
+    try:
+        while 1: os.wait()
+    except:
+        pass
 
 if __name__ == '__main__':
-  loop.run_loop(main)
+    loop.run_loop(main)
index 1f4653a76d110bf95ba4a621621a0a8d50e7bec2..102df85697be5b5a616fc43a98398ae77fab6afb 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import fcntl
 
 import path
@@ -5,15 +7,15 @@ import path
 locks_list = []
 
 def lock(n, non_block = 0):
-  f = open(path.lock_dir + n, "a")
-  # blah, otherwise it gets garbage collected and doesn't work
-  locks_list.append(f)
-  if non_block:
-    try:
-      fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
-    except:
-      f.close()
-      return None
-  else:
-    fcntl.flock(f, fcntl.LOCK_EX)
-  return f
+    f = open(path.lock_dir + n, "a")
+    # blah, otherwise it gets garbage collected and doesn't work
+    locks_list.append(f)
+    if non_block:
+        try:
+            fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
+        except:
+            f.close()
+            return None
+    else:
+        fcntl.flock(f, fcntl.LOCK_EX)
+    return f
index fda61c94e3bbd04dc8c71d7eca33ac481be63600..db5a15d07ae80f43b5d5f08e7d3da6523996132b 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import sys
 import time
 import syslog
@@ -8,32 +10,32 @@ builder = ""
 do_syslog = 0
 
 def log(p, s):
-  if do_syslog:
-    try:
-      syslog.syslog(p, str(s))
-    except TypeError:
-      syslog.syslog(p, repr(s))
-  f = open(path.log_file, "a")
-  f.write("%s [%s]: %s\n" % (time.asctime(), builder, s))
-  f.close()
-  
+    if do_syslog:
+        try:
+            syslog.syslog(p, str(s))
+        except TypeError:
+            syslog.syslog(p, repr(s))
+    f = open(path.log_file, "a")
+    f.write("%s [%s]: %s\n" % (time.asctime(), builder, s))
+    f.close()
+    
 def panic(s):
-  log(syslog.LOG_ALERT, "PANIC: %s" % s)
-  raise "PANIC: %s" % str(s)
+    log(syslog.LOG_ALERT, "PANIC: %s" % s)
+    raise "PANIC: %s" % str(s)
 
 def alert(s):
-  log(syslog.LOG_ALERT, "alert: %s" % s) 
+    log(syslog.LOG_ALERT, "alert: %s" % s) 
  
 def error(s):
-  log(syslog.LOG_ERR, "error: %s" % s) 
+    log(syslog.LOG_ERR, "error: %s" % s) 
  
 def warn(s):
-  log(syslog.LOG_WARNING, "warning: %s" % s) 
+    log(syslog.LOG_WARNING, "warning: %s" % s) 
  
 def notice(s):
-  log(syslog.LOG_NOTICE, "notice: %s" % s) 
+    log(syslog.LOG_NOTICE, "notice: %s" % s) 
 
 def open_syslog(name, f):
-  global do_syslog
-  do_syslog = 1
-  syslog.openlog(name, syslog.LOG_PID, f)
+    global do_syslog
+    do_syslog = 1
+    syslog.openlog(name, syslog.LOG_PID, f)
index 134b971983b461191697cc81a7f763d679b0736b..5fa4b1e44a207b9aa214e8c430a158f59b047c22 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import os
 import sys
 import time
@@ -5,25 +7,25 @@ import time
 import wrap
 
 def run_loop(fnc, secs = 5, max = 60):
-  def run():
-    pid = os.fork()
-    if pid == 0:
-      wrap.wrap(fnc)
-      sys.exit(0)
-    else:
-      pid, s = os.waitpid(pid, 0)
-      if os.WIFEXITED(s):
-        s = os.WEXITSTATUS(s)
-        if s != 0:
-          sys.exit(s)
-      else:
-        sys.exit(10)
-      
-  start = time.time()
-  while time.time() - start < max:
-    last = time.time()
-    run()
-    took = time.time() - last
-    if took < secs:
-      time.sleep(secs - took)
+    def run():
+        pid = os.fork()
+        if pid == 0:
+            wrap.wrap(fnc)
+            sys.exit(0)
+        else:
+            pid, s = os.waitpid(pid, 0)
+            if os.WIFEXITED(s):
+                s = os.WEXITSTATUS(s)
+                if s != 0:
+                    sys.exit(s)
+            else:
+                sys.exit(10)
+            
+    start = time.time()
+    while time.time() - start < max:
+        last = time.time()
+        run()
+        took = time.time() - last
+        if took < secs:
+            time.sleep(secs - took)
 
index e54085ec8946871457a09a88ba6f1697705dda66..31584b055e107b640fbaaa00ead392429b0bdce8 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import time
 import os
 import sys
@@ -7,64 +9,64 @@ from config import config
 import util
 
 class Message:
-  def __init__(self):
-    self.headers = {}
-    self.body = StringIO.StringIO()
-    self.set_std_headers()
+    def __init__(self):
+        self.headers = {}
+        self.body = StringIO.StringIO()
+        self.set_std_headers()
 
-  def set_header(self, n, v):
-    self.headers[n] = v
+    def set_header(self, n, v):
+        self.headers[n] = v
 
-  def set_headers(self, to = None, cc = None, subject = None):
-    if to != None:
-      self.set_header("To", to)
-    if cc != None:
-      self.set_header("Cc", cc)
-    if subject != None:
-      self.set_header("Subject", subject)
+    def set_headers(self, to = None, cc = None, subject = None):
+        if to != None:
+            self.set_header("To", to)
+        if cc != None:
+            self.set_header("Cc", cc)
+        if subject != None:
+            self.set_header("Subject", subject)
 
-  def write_line(self, l):
-    self.body.write("%s\n" % l)
+    def write_line(self, l):
+        self.body.write("%s\n" % l)
 
-  def write(self, s):
-    self.body.write(s)
+    def write(self, s):
+        self.body.write(s)
 
-  def append_log(self, log):
-    s = os.stat(log)
-    if s.st_size > 50000:
-      # just head and tail
-      f = open(log)
-      line_cnt = 0
-      for l in f.xreadlines():
-        line_cnt += 1
-      f.seek(0)
-      line = 0
-      for l in f.xreadlines():
-        if line < 100 or line > line_cnt - 100:
-          self.body.write(l)
-        if line == line_cnt - 100:
-          self.body.write("\n\n[...]\n\n")
-        line += 1
-    else:
-      util.sendfile(open(log), self.body)
+    def append_log(self, log):
+        s = os.stat(log)
+        if s.st_size > 50000:
+            # just head and tail
+            f = open(log)
+            line_cnt = 0
+            for l in f.xreadlines():
+                line_cnt += 1
+            f.seek(0)
+            line = 0
+            for l in f.xreadlines():
+                if line < 100 or line > line_cnt - 100:
+                    self.body.write(l)
+                if line == line_cnt - 100:
+                    self.body.write("\n\n[...]\n\n")
+                line += 1
+        else:
+            util.sendfile(open(log), self.body)
 
-  def set_std_headers(self):
-    self.headers["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
-    self.headers["Message-ID"] = "<pld-builder.%f.%d@%s>" \
-        % (time.time(), os.getpid(), os.uname()[1])
-    self.headers["From"] = "PLD %s builder <%s>" \
-        % (config.builder, config.email)
-    self.headers["X-PLD-Builder"] = config.builder
+    def set_std_headers(self):
+        self.headers["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
+        self.headers["Message-ID"] = "<pld-builder.%f.%d@%s>" \
+                % (time.time(), os.getpid(), os.uname()[1])
+        self.headers["From"] = "PLD %s builder <%s>" \
+                % (config.builder, config.email)
+        self.headers["X-PLD-Builder"] = config.builder
 
-  def write_to(self, f):
-    for k, v in self.headers.items():
-      f.write("%s: %s\n" % (k, v))
-    f.write("\n")
-    self.body.seek(0)
-    util.sendfile(self.body, f)
+    def write_to(self, f):
+        for k, v in self.headers.items():
+            f.write("%s: %s\n" % (k, v))
+        f.write("\n")
+        self.body.seek(0)
+        util.sendfile(self.body, f)
 
-  def send(self):
-    send_sendmail = "/usr/sbin/sendmail -t -f %s" % config.admin_email
-    f = os.popen(send_sendmail, "w")
-    self.write_to(f)
-    f.close()
+    def send(self):
+        send_sendmail = "/usr/sbin/sendmail -t -f %s" % config.admin_email
+        f = os.popen(send_sendmail, "w")
+        self.write_to(f)
+        f.close()
index 055cd41f4582460f718144492557f717335dc08a..8b71103a04d172b9527c6ea0b5afe9121d9ecc5d 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import StringIO
 
 import mailer
@@ -6,32 +8,32 @@ import util
 from config import config
 
 class Notifier:
-  def __init__(self, g):
-    self.xml = StringIO.StringIO()
-    self.xml.write("<notification group-id='%s' builder='%s'>\n" % \
+    def __init__(self, g):
+        self.xml = StringIO.StringIO()
+        self.xml.write("<notification group-id='%s' builder='%s'>\n" % \
                         (g.id, config.builder))
-  
-  def send(self):
-    self.xml.write("</notification>\n")
-    msg = mailer.Message()
-    msg.set_headers(to = config.notify_email, subject = "status notification")
-    msg.set_header("X-New-PLD-Builder", "status-notification")
-    self.xml.seek(0)
-    util.sendfile(gpg.sign(self.xml), msg)
-    msg.send()
-    self.xml = None
-  
-  def add_batch(self, b, s):
-    self.xml.write("  <batch id='%s' status='%s' />\n" % (b.b_id, s))
-  
+    
+    def send(self):
+        self.xml.write("</notification>\n")
+        msg = mailer.Message()
+        msg.set_headers(to = config.notify_email, subject = "status notification")
+        msg.set_header("X-New-PLD-Builder", "status-notification")
+        self.xml.seek(0)
+        util.sendfile(gpg.sign(self.xml), msg)
+        msg.send()
+        self.xml = None
+    
+    def add_batch(self, b, s):
+        self.xml.write("  <batch id='%s' status='%s' />\n" % (b.b_id, s))
+    
 n = None
 
 def begin(group):
-  global n
-  n = Notifier(group)
+    global n
+    n = Notifier(group)
 
 def add_batch(batch, status):
-  n.add_batch(batch, status)
+    n.add_batch(batch, status)
 
 def send():
-  n.send()
+    n.send()
index 07ecd529df1e9ef1810cd1e0ede67cd96513cc43..a1f30eeaaa3e21c802936d0c15d2c46c9461229c 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import os.path
 
 root_dir = os.path.expanduser('~/pld-builder.new/')
index 93c373c7447428de975b52fc05f3baf18006eb77..e81a752fc8ea69605080b246ec7df309e0cca1e6 100644 (file)
@@ -1,34 +1,36 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import select
 import os
 import StringIO
 
 def rw_pipe(buf_, infd, outfd):
-  buf = StringIO.StringIO()
-  buf.write(buf_.read())
-  ret = StringIO.StringIO()
-  pos = 0
-  rd_fin = 0
-  wr_fin = 0
-  buf.seek(pos)
-  while not (rd_fin and wr_fin):
-    if wr_fin: o = []
-    else: o = [infd]
-    if rd_fin: i = []
-    else: i = [outfd]
-    i, o, e = select.select(i, o, [])
-    if i != []:
-      s = os.read(outfd.fileno(), 1000)
-      if s == "": rd_fin = 1
-      ret.write(s)
-    if o != []:
-      buf.seek(pos)
-      s = buf.read(1000)
-      if s == "":
-        infd.close()
-        wr_fin = 1
-      else:
-        cnt = os.write(infd.fileno(), s)
-        pos += cnt
-  outfd.close()
-  ret.seek(0)
-  return ret
+    buf = StringIO.StringIO()
+    buf.write(buf_.read())
+    ret = StringIO.StringIO()
+    pos = 0
+    rd_fin = 0
+    wr_fin = 0
+    buf.seek(pos)
+    while not (rd_fin and wr_fin):
+        if wr_fin: o = []
+        else: o = [infd]
+        if rd_fin: i = []
+        else: i = [outfd]
+        i, o, e = select.select(i, o, [])
+        if i != []:
+            s = os.read(outfd.fileno(), 1000)
+            if s == "": rd_fin = 1
+            ret.write(s)
+        if o != []:
+            buf.seek(pos)
+            s = buf.read(1000)
+            if s == "":
+                infd.close()
+                wr_fin = 1
+            else:
+                cnt = os.write(infd.fileno(), s)
+                pos += cnt
+    outfd.close()
+    ret.seek(0)
+    return ret
index c4b50ef698c9a9665482f1ddcf34a1af3943e88f..ad9fa8bfc6f29ca416febcf7882cf7cb24d749da 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import re
 import types
 import string
@@ -8,78 +10,78 @@ from util import *
 
 
 def get_poldek_requires():
-  # precompile regexps
-  name_rx = re.compile(r"\d+\. ([^\s]+)-[^-]+-[^-]+\n")
-  req_rx = re.compile(r" req .* --> (.*)\n")
-  pkg_name_rx = re.compile(r"([^\s]+)-[^-]+-[^-]+")
+    # precompile regexps
+    name_rx = re.compile(r"\d+\. ([^\s]+)-[^-]+-[^-]+\n")
+    req_rx = re.compile(r" req .* --> (.*)\n")
+    pkg_name_rx = re.compile(r"([^\s]+)-[^-]+-[^-]+")
 
-  # todo: if a and b are sets, then use sets module
-  # and intersection method on set object
-  def intersect(a, b):
-    r = []
-    for x in a: 
-      if x in b: r.append(x)
-    return r
-  
-  # add given req-list to cur_pkg_reqs
-  def add_req(reqs):
-    if len(reqs) == 1:
-      if reqs[0] not in cur_pkg_reqs:
-        cur_pkg_reqs.append(reqs[0])
-    else:
-      did = 0
-      for x in cur_pkg_reqs:
-        if type(x) is types.ListType:
-          i = intersect(x, reqs)
-          if len(i) == 0:
-            continue
-          did = 1
-          idx = cur_pkg_reqs.index(x)
-          if len(i) == 1:
-            if i[0] in cur_pkg_reqs:
-              del cur_pkg_reqs[idx]
-            else:
-              cur_pkg_reqs[idx] = i[0]
-          else:
-            cur_pkg_reqs[idx] = i
+    # todo: if a and b are sets, then use sets module
+    # and intersection method on set object
+    def intersect(a, b):
+        r = []
+        for x in a: 
+            if x in b: r.append(x)
+        return r
+    
+    # add given req-list to cur_pkg_reqs
+    def add_req(reqs):
+        if len(reqs) == 1:
+            if reqs[0] not in cur_pkg_reqs:
+                cur_pkg_reqs.append(reqs[0])
         else:
-          if x in reqs:
-            return
-      if not did:
-        cur_pkg_reqs.append(reqs)
+            did = 0
+            for x in cur_pkg_reqs:
+                if type(x) is types.ListType:
+                    i = intersect(x, reqs)
+                    if len(i) == 0:
+                        continue
+                    did = 1
+                    idx = cur_pkg_reqs.index(x)
+                    if len(i) == 1:
+                        if i[0] in cur_pkg_reqs:
+                            del cur_pkg_reqs[idx]
+                        else:
+                            cur_pkg_reqs[idx] = i[0]
+                    else:
+                        cur_pkg_reqs[idx] = i
+                else:
+                    if x in reqs:
+                        return
+            if not did:
+                cur_pkg_reqs.append(reqs)
+        
+    pkg_reqs = {}
+    cur_pkg_reqs = None
+    cur_pkg = None
     
-  pkg_reqs = {}
-  cur_pkg_reqs = None
-  cur_pkg = None
-  
-  f = chr_popen("poldek -v -v --verify --unique-pkg-names")
-  for l in xreadlines.xreadlines(f):
-    m = name_rx.match(l)
-    if m:
-      if cur_pkg:
-        pkg_reqs[cur_pkg] = cur_pkg_reqs
-      cur_pkg = m.groups(1)
-      if pkg_reqs.has_key(cur_pkg):
-        cur_pkg = None
-        cur_pkg_reqs = None
-      else:
-        cur_pkg_reqs = []
-      continue
-    m = req_rx.match(l)
-    if m:
-      reqs = []
-      for x in string.split(m.group(1)):
-        if x in ["RPMLIB_CAP", "NOT", "FOUND", "UNMATCHED"]: continue
-        m = pkg_name_rx.match(x)
+    f = chr_popen("poldek -v -v --verify --unique-pkg-names")
+    for l in xreadlines.xreadlines(f):
+        m = name_rx.match(l)
         if m:
-          reqs.append(m.group(1))
-        else:
-          msg("poldek_reqs: bad pkg name: %s\n" % x)
-      if len(reqs) != 0: add_req(reqs)
-  
-  f.close()
+            if cur_pkg:
+                pkg_reqs[cur_pkg] = cur_pkg_reqs
+            cur_pkg = m.groups(1)
+            if pkg_reqs.has_key(cur_pkg):
+                cur_pkg = None
+                cur_pkg_reqs = None
+            else:
+                cur_pkg_reqs = []
+            continue
+        m = req_rx.match(l)
+        if m:
+            reqs = []
+            for x in string.split(m.group(1)):
+                if x in ["RPMLIB_CAP", "NOT", "FOUND", "UNMATCHED"]: continue
+                m = pkg_name_rx.match(x)
+                if m:
+                    reqs.append(m.group(1))
+                else:
+                    msg("poldek_reqs: bad pkg name: %s\n" % x)
+            if len(reqs) != 0: add_req(reqs)
+    
+    f.close()
 
-  if cur_pkg:
-    pkg_reqs[cur_pkg] = cur_pkg_reqs
+    if cur_pkg:
+        pkg_reqs[cur_pkg] = cur_pkg_reqs
 
-  return pkg_reqs
+    return pkg_reqs
index 9f816bba3ce78f6bb9a08fdb1955fb309d426be6..0a91a64c28408d9b38a22a7b82f100c0a1cf2ca4 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import string
 import path
 
@@ -7,123 +9,123 @@ import mailer
 from config import config
 
 def unpackaged_files(b):
-  msg = "warning: Installed (but unpackaged) file(s) found:\n"
-  f = open(b.logfile)
-  copy_mode = 0
-  out = []
-  for l in f.xreadlines():
-    if l == msg:
-      copy_mode = 1
-      out.append(l)
-    elif copy_mode:
-      if l[0] != ' ':
-        copy_mode = 0
-      else:
-        out.append(l)
-  return out
+    msg = "warning: Installed (but unpackaged) file(s) found:\n"
+    f = open(b.logfile)
+    copy_mode = 0
+    out = []
+    for l in f.xreadlines():
+        if l == msg:
+            copy_mode = 1
+            out.append(l)
+        elif copy_mode:
+            if l[0] != ' ':
+                copy_mode = 0
+            else:
+                out.append(l)
+    return out
 
 def add_pld_builder_info(b):
-  l = open(b.logfile, "a")
-  l.write("Begin-PLD-Builder-Info\n")
-  l.write("Build-Time: %s\n\n" % b.build_time)
-  st = ftp.status()
-  if st != "":
-    l.write("Files queued for ftp:\n%s\n" % st)
-  ftp.clear_status()
-  l.writelines(unpackaged_files(b))
-  l.write("End-PLD-Builder-Info\n")
+    l = open(b.logfile, "a")
+    l.write("Begin-PLD-Builder-Info\n")
+    l.write("Build-Time: %s\n\n" % b.build_time)
+    st = ftp.status()
+    if st != "":
+        l.write("Files queued for ftp:\n%s\n" % st)
+    ftp.clear_status()
+    l.writelines(unpackaged_files(b))
+    l.write("End-PLD-Builder-Info\n")
 
 def info_from_log(b, target):
-  beg = "Begin-PLD-Builder-Info\n"
-  end = "End-PLD-Builder-Info\n"
-  f = open(b.logfile)
-  copy_mode = 0
-  need_header = 1
-  for l in f.xreadlines():
-    if l == beg:
-      if need_header:
-        need_header = 0
-        target.write("\n--- %s:%s:\n" % (b.spec, b.branch))
-      copy_mode = 1
-    elif copy_mode:
-      if l == end:
-        copy_mode = 0
-      else:
-        target.write(l)
-  
+    beg = "Begin-PLD-Builder-Info\n"
+    end = "End-PLD-Builder-Info\n"
+    f = open(b.logfile)
+    copy_mode = 0
+    need_header = 1
+    for l in f.xreadlines():
+        if l == beg:
+            if need_header:
+                need_header = 0
+                target.write("\n--- %s:%s:\n" % (b.spec, b.branch))
+            copy_mode = 1
+        elif copy_mode:
+            if l == end:
+                copy_mode = 0
+            else:
+                target.write(l)
+    
 def send_report(r, is_src = False):
-  s_failed = ' '.join([b.spec for b in r.batches if b.build_failed])
-  s_ok = ' '.join([b.spec for b in r.batches if not b.build_failed])
-
-  if s_failed: s_failed = "ERRORS: %s" % s_failed
-  if s_ok: s_ok = "OK: %s" % s_ok
-
-  subject = ' '.join((s_failed, s_ok))
-  
-  m = mailer.Message()
-  m.set_headers(to = r.requester_email,
-                cc = config.builder_list,
-                subject = subject[0:100])
-  if is_src:
-    m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id)
-  else:
-    m.set_header("References", "<%s@pld.src.builder>" % r.id)
-    m.set_header("In-Reply-To", "<%s@pld.src.builder>" % r.id)
-
-  for b in r.batches:
-    if b.build_failed and b.logfile == None:
-      info = b.skip_reason
-    elif b.build_failed: 
-      info = "FAILED"
-    else: 
-      info = "OK"
-    m.write("%s (%s): %s\n" % (b.spec, b.branch, info))
-
-  for b in r.batches:
-    if b.logfile != None:
-      info_from_log(b, m)
-
-  for b in r.batches:
-    if (b.is_command () or b.build_failed) and b.logfile != None:
-      m.write("\n\n*** buildlog for %s\n" % b.spec)
-      m.append_log(b.logfile)
-      m.write("\n\n")
-      
-  m.send()
+    s_failed = ' '.join([b.spec for b in r.batches if b.build_failed])
+    s_ok = ' '.join([b.spec for b in r.batches if not b.build_failed])
 
-def send_cia_report(r, is_src = False):
+    if s_failed: s_failed = "ERRORS: %s" % s_failed
+    if s_ok: s_ok = "OK: %s" % s_ok
 
-  subject = 'DeliverXML'
-  
-  m = mailer.Message()
-  if (len(config.bot_email) == 0):
-    return
-
-  m.set_headers(to = config.bot_email,
-                subject = subject)
-  m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id)
-  m.set_header("X-mailer", "$Id$")
-  m.set_header("X-builder", "PLD")
-
-  # get header of xml message from file
-  f = open(path.root_dir + 'PLD_Builder/cia-head.xml')
-  m.write(f.read())
-  f.close()
-
-  # write in iteration list and status of all processed files
-  for b in r.batches:
-    # Instead of hardcoded Ac information use some config variable
-    m.write('<package name="%s" arch="%s">\n' % (b.spec, b.branch))
-    if b.build_failed:
-           m.write('<failed/>\n')
+    subject = ' '.join((s_failed, s_ok))
+    
+    m = mailer.Message()
+    m.set_headers(to = r.requester_email,
+                  cc = config.builder_list,
+                  subject = subject[0:100])
+    if is_src:
+        m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id)
     else:
-           m.write('<success/>\n')
-    m.write('</package>\n')
-
-  # get footer of xml message from file
-  f = open(path.root_dir + 'PLD_Builder/cia-foot.xml')
-  m.write(f.read())
-  f.close()
-           
-  # send the e-mail
-  m.send()
+        m.set_header("References", "<%s@pld.src.builder>" % r.id)
+        m.set_header("In-Reply-To", "<%s@pld.src.builder>" % r.id)
+
+    for b in r.batches:
+        if b.build_failed and b.logfile == None:
+            info = b.skip_reason
+        elif b.build_failed: 
+            info = "FAILED"
+        else: 
+            info = "OK"
+        m.write("%s (%s): %s\n" % (b.spec, b.branch, info))
+
+    for b in r.batches:
+        if b.logfile != None:
+            info_from_log(b, m)
+
+    for b in r.batches:
+        if (b.is_command () or b.build_failed) and b.logfile != None:
+            m.write("\n\n*** buildlog for %s\n" % b.spec)
+            m.append_log(b.logfile)
+            m.write("\n\n")
+            
+    m.send()
+
+def send_cia_report(r, is_src = False):
+
+    subject = 'DeliverXML'
+    
+    m = mailer.Message()
+    if (len(config.bot_email) == 0):
+        return
+
+    m.set_headers(to = config.bot_email,
+                  subject = subject)
+    m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id)
+    m.set_header("X-mailer", "$Id$")
+    m.set_header("X-builder", "PLD")
+
+    # get header of xml message from file
+    f = open(path.root_dir + 'PLD_Builder/cia-head.xml')
+    m.write(f.read())
+    f.close()
+
+    # write in iteration list and status of all processed files
+    for b in r.batches:
+        # Instead of hardcoded Ac information use some config variable
+        m.write('<package name="%s" arch="%s">\n' % (b.spec, b.branch))
+        if b.build_failed:
+            m.write('<failed/>\n')
+        else:
+            m.write('<success/>\n')
+        m.write('</package>\n')
+
+    # get footer of xml message from file
+    f = open(path.root_dir + 'PLD_Builder/cia-foot.xml')
+    m.write(f.read())
+    f.close()
+                        
+    # send the e-mail
+    m.send()
index db9b9a06011dcdeb78f95d3132192f125ab96203..6bb6172102607ce43bb2872ee326996ef35f8f24 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 from xml.dom.minidom import *
 import string
 import time
@@ -10,312 +12,313 @@ import log
 from acl import acl
 
 __all__ = ['parse_request', 'parse_requests']
-  
+    
 def text(e):
-  res = ""
-  for n in e.childNodes:
-    if n.nodeType != Element.TEXT_NODE:
-      log.panic("xml: text expected in <%s>, got %d" % (e.nodeName, n.nodeType))
-    res += n.nodeValue
-  return res
+    res = ""
+    for n in e.childNodes:
+        if n.nodeType != Element.TEXT_NODE:
+            log.panic("xml: text expected in <%s>, got %d" % (e.nodeName, n.nodeType))
+        res += n.nodeValue
+    return res
 
 def attr(e, a, default = None):
-  try:
-    return e.attributes[a].value
-  except:
-    if default != None:
-      return default
-    raise
+    try:
+        return e.attributes[a].value
+    except:
+        if default != None:
+            return default
+        raise
 
 def escape(s):
-  return xml.sax.saxutils.escape(s)
+    return xml.sax.saxutils.escape(s)
 
 def is_blank(e):
-  return e.nodeType == Element.TEXT_NODE and string.strip(e.nodeValue) == ""
-  
+    return e.nodeType == Element.TEXT_NODE and string.strip(e.nodeValue) == ""
+    
 class Group:
-  def __init__(self, e):
-    self.batches = []
-    self.kind = 'group'
-    self.id = attr(e, "id")
-    self.no = int(attr(e, "no"))
-    self.priority = 2
-    self.time = time.time()
-    self.requester = ""
-    self.requester_email = ""
-    self.flags = string.split(attr(e, "flags", ""))
-    for c in e.childNodes:
-      if is_blank(c): continue
-      if c.nodeType != Element.ELEMENT_NODE:
-        log.panic("xml: evil group child %d" % c.nodeType)
-      if c.nodeName == "batch":
-        self.batches.append(Batch(c))
-      elif c.nodeName == "requester":
-        self.requester = text(c)
-        self.requester_email = attr(c, "email", "")
-      elif c.nodeName == "priority":
-        self.priority = int(text(c))
-      elif c.nodeName == "time":
-        self.time = int(text(c))
-      else:
-        log.panic("xml: evil group child (%s)" % c.nodeName)
-    # note that we also check that group is sorted WRT deps
-    m = {}
-    for b in self.batches:
-      deps = []
-      m[b.b_id] = b
-      for dep in b.depends_on:
-        if m.has_key(dep):
-          # avoid self-deps
-          if id(m[dep]) != id(b):
-            deps.append(m[dep])
-        else:
-          log.panic("xml: dependency not found in group")
-      b.depends_on = deps
-    if self.requester_email == "" and self.requester != "":
-      self.requester_email = acl.user(self.requester).mail_to()
+    def __init__(self, e):
+        self.batches = []
+        self.kind = 'group'
+        self.id = attr(e, "id")
+        self.no = int(attr(e, "no"))
+        self.priority = 2
+        self.time = time.time()
+        self.requester = ""
+        self.requester_email = ""
+        self.flags = string.split(attr(e, "flags", ""))
+        for c in e.childNodes:
+            if is_blank(c): continue
+            if c.nodeType != Element.ELEMENT_NODE:
+                log.panic("xml: evil group child %d" % c.nodeType)
+            if c.nodeName == "batch":
+                self.batches.append(Batch(c))
+            elif c.nodeName == "requester":
+                self.requester = text(c)
+                self.requester_email = attr(c, "email", "")
+            elif c.nodeName == "priority":
+                self.priority = int(text(c))
+            elif c.nodeName == "time":
+                self.time = int(text(c))
+            else:
+                log.panic("xml: evil group child (%s)" % c.nodeName)
+        # note that we also check that group is sorted WRT deps
+        m = {}
+        for b in self.batches:
+            deps = []
+            m[b.b_id] = b
+            for dep in b.depends_on:
+                if m.has_key(dep):
+                    # avoid self-deps
+                    if id(m[dep]) != id(b):
+                        deps.append(m[dep])
+                else:
+                    log.panic("xml: dependency not found in group")
+            b.depends_on = deps
+        if self.requester_email == "" and self.requester != "":
+            self.requester_email = acl.user(self.requester).mail_to()
 
-  def dump(self, f):
-    f.write("group: %d (id=%s pri=%d)\n" % (self.no, self.id, self.priority))
-    f.write("  from: %s\n" % self.requester)
-    f.write("  flags: %s\n" % string.join(self.flags))
-    f.write("  time: %s\n" % time.asctime(time.localtime(self.time)))
-    for b in self.batches:
-      b.dump(f)
-    f.write("\n")
+    def dump(self, f):
+        f.write("group: %d (id=%s pri=%d)\n" % (self.no, self.id, self.priority))
+        f.write("  from: %s\n" % self.requester)
+        f.write("  flags: %s\n" % string.join(self.flags))
+        f.write("  time: %s\n" % time.asctime(time.localtime(self.time)))
+        for b in self.batches:
+            b.dump(f)
+        f.write("\n")
 
-  def dump_html(self, f):
-    f.write("<p><b>%d</b>. %s from %s <small>%s, %d, %s</small><br/>\n" % \
+    def dump_html(self, f):
+        f.write("<p><b>%d</b>. %s from %s <small>%s, %d, %s</small><br/>\n" % \
                 (self.no,
                  escape(time.strftime("%Y.%m.%d %H:%M:%S", time.localtime(self.time))),
                  escape(self.requester),
                  self.id, self.priority, string.join(self.flags)))
-    f.write("<ul>\n")
-    for b in self.batches:
-      b.dump_html(f)
-    f.write("</ul>\n")
-    f.write("</p>\n")
+        f.write("<ul>\n")
+        for b in self.batches:
+            b.dump_html(f)
+        f.write("</ul>\n")
+        f.write("</p>\n")
 
-  def write_to(self, f):
-    f.write("""
+    def write_to(self, f):
+        f.write("""
        <group id="%s" no="%d" flags="%s">
          <requester email='%s'>%s</requester>
          <time>%d</time>
          <priority>%d</priority>\n""" % (self.id, self.no, string.join(self.flags),
-                escape(self.requester_email), escape(self.requester), 
-                self.time, self.priority))
-    for b in self.batches:
-      b.write_to(f)
-    f.write("       </group>\n\n")
+                    escape(self.requester_email), escape(self.requester), 
+                    self.time, self.priority))
+        for b in self.batches:
+            b.write_to(f)
+        f.write("       </group>\n\n")
 
-  def is_done(self):
-    ok = 1
-    for b in self.batches:
-      if not b.is_done():
-        ok = 0
-    return ok
+    def is_done(self):
+        ok = 1
+        for b in self.batches:
+            if not b.is_done():
+                ok = 0
+        return ok
 
 class Batch:
-  def __init__(self, e):
-    self.bconds_with = []
-    self.bconds_without = []
-    self.builders = []
-    self.builders_status = {}
-    self.branch = ""
-    self.src_rpm = ""
-    self.info = ""
-    self.spec = ""
-    self.command = ""
-    self.command_flags = []
-    self.gb_id = ""
-    self.b_id = attr(e, "id")
-    self.depends_on = string.split(attr(e, "depends-on"))
-    for c in e.childNodes:
-      if is_blank(c): continue
-      if c.nodeType != Element.ELEMENT_NODE:
-        log.panic("xml: evil batch child %d" % c.nodeType)
-      if c.nodeName == "src-rpm":
-        self.src_rpm = text(c)
-      elif c.nodeName == "spec":
-        self.spec = text(c)
-      elif c.nodeName == "command":
-        self.spec = "COMMAND"
-        self.command = text(c)
-        self.command_flags = string.split(attr(c, "flags", ""))
-      elif c.nodeName == "info":
-        self.info = text(c)
-      elif c.nodeName == "branch":
-        self.branch = text(c)
-      elif c.nodeName == "builder":
-        self.builders.append(text(c))
-        self.builders_status[text(c)] = attr(c, "status", "?")
-      elif c.nodeName == "with":
-        self.bconds_with.append(text(c))
-      elif c.nodeName == "without":
-        self.bconds_without.append(text(c))
-      else:
-        log.panic("xml: evil batch child (%s)" % c.nodeName)
+    def __init__(self, e):
+        self.bconds_with = []
+        self.bconds_without = []
+        self.builders = []
+        self.builders_status = {}
+        self.branch = ""
+        self.src_rpm = ""
+        self.info = ""
+        self.spec = ""
+        self.command = ""
+        self.command_flags = []
+        self.gb_id = ""
+        self.b_id = attr(e, "id")
+        self.depends_on = string.split(attr(e, "depends-on"))
+        for c in e.childNodes:
+            if is_blank(c): continue
+            if c.nodeType != Element.ELEMENT_NODE:
+                log.panic("xml: evil batch child %d" % c.nodeType)
+            if c.nodeName == "src-rpm":
+                self.src_rpm = text(c)
+            elif c.nodeName == "spec":
+                self.spec = text(c)
+            elif c.nodeName == "command":
+                self.spec = "COMMAND"
+                self.command = text(c)
+                self.command_flags = string.split(attr(c, "flags", ""))
+            elif c.nodeName == "info":
+                self.info = text(c)
+            elif c.nodeName == "branch":
+                self.branch = text(c)
+            elif c.nodeName == "builder":
+                self.builders.append(text(c))
+                self.builders_status[text(c)] = attr(c, "status", "?")
+            elif c.nodeName == "with":
+                self.bconds_with.append(text(c))
+            elif c.nodeName == "without":
+                self.bconds_without.append(text(c))
+            else:
+                log.panic("xml: evil batch child (%s)" % c.nodeName)
  
-  def is_done(self):
-    ok = 1
-    for b in self.builders:
-      s = self.builders_status[b]
-      if not (s == "OK" or s == "FAIL" or s == "SKIP"):
-        ok = 0
-    return ok
-      
-  def dump(self, f):
-    f.write("  batch: %s/%s\n" % (self.src_rpm, self.spec))
-    f.write("    info: %s\n" % self.info)
-    f.write("    branch: %s\n" % self.branch)
-    f.write("    bconds: %s\n" % self.bconds_string())
-    builders = []
-    for b in self.builders:
-      builders.append("%s:%s" % (b, self.builders_status[b]))
-    f.write("    builders: %s\n" % string.join(builders))
+    def is_done(self):
+        ok = 1
+        for b in self.builders:
+            s = self.builders_status[b]
+            if not (s == "OK" or s == "FAIL" or s == "SKIP"):
+                ok = 0
+        return ok
+            
+    def dump(self, f):
+        f.write("  batch: %s/%s\n" % (self.src_rpm, self.spec))
+        f.write("    info: %s\n" % self.info)
+        f.write("    branch: %s\n" % self.branch)
+        f.write("    bconds: %s\n" % self.bconds_string())
+        builders = []
+        for b in self.builders:
+            builders.append("%s:%s" % (b, self.builders_status[b]))
+        f.write("    builders: %s\n" % string.join(builders))
 
-  def is_command(self):
-    return self.command != ""
+    def is_command(self):
+        return self.command != ""
 
-  def dump_html(self, f):
-    f.write("<li>\n")
-    if self.is_command():
-      desc = "SH: %s [%s]" % (self.command, ' '.join(self.command_flags))
-    else:
-      desc = "%s (%s -R %s %s)" % \
-        (self.src_rpm, self.spec, self.branch, self.bconds_string())
-    f.write("%s <small>[" % desc)
-    builders = []
-    bl_archs = {
-      "th-SRPMS":0, "th-i486":1, "th-i686":2, "th-athlon":3, "th-ia64":4,
-      "th-alpha":5, "th-sparc":6, "th-ppc":7,
-      "ac-SRPMS":8, "ac-i386":9, "ac-i586":10, "ac-i686":11, "ac-athlon":12,
-      "ac-amd64":13, "ac-alpha":14, "ac-sparc":15, "ac-ppc":16,
-      "ra-i386":17, "ra-i586":18, "ra-i686":19, "ra-alpha":20,
-      "ra-sparc":21, "ra-ppc":22, "nest-i486":23, "nest-i686":24,
-      "nest-alpha":25, "nest-ppc":26
-    }
-    for b in self.builders:
-      s = self.builders_status[b]
-      if s == "OK":
-        c = "green"
-      elif s == "FAIL":
-        c = "red"
-      elif s == "SKIP":
-        c = "blue"
-      else:
-        c = "black"
-      link_pre = ""
-      link_post = ""
-      if bl_archs.has_key(b) and (s == "OK" or s == "FAIL") and len(self.spec) > 5:
+    def dump_html(self, f):
+        f.write("<li>\n")
         if self.is_command():
-         bl_name = "command"
-       else:
-         bl_name = self.spec[:len(self.spec)-5]
-        path = "/%s/%s/%s.bz2" % (b.replace('-','/'), s, bl_name)
-        is_ok = 0
-        if s == "OK": is_ok = 1
-        link_pre = "<a href=\"http://buildlogs.pld-linux.org/index.php?idx=%d&ok=%d&id=%s\">" \
-                % (bl_archs[b], is_ok, binascii.b2a_hex(md5.new(path).digest()))
-        link_post = "</a>"
-      builders.append(link_pre + ("<font color='%s'><b>%s:%s</b></font>" % (c, b, s)) + link_post)
-    f.write("%s]</small></li>\n" % string.join(builders))
+            desc = "SH: %s [%s]" % (self.command, ' '.join(self.command_flags))
+        else:
+            desc = "%s (%s -R %s %s)" % \
+                (self.src_rpm, self.spec, self.branch, self.bconds_string())
+        f.write("%s <small>[" % desc)
+        builders = []
+        bl_archs = {
+            "th-SRPMS":0, "th-i486":1, "th-i686":2, "th-athlon":3, "th-ia64":4,
+            "th-alpha":5, "th-sparc":6, "th-ppc":7,
+            "ac-SRPMS":8, "ac-i386":9, "ac-i586":10, "ac-i686":11, "ac-athlon":12,
+            "ac-amd64":13, "ac-alpha":14, "ac-sparc":15, "ac-ppc":16,
+            "ra-i386":17, "ra-i586":18, "ra-i686":19, "ra-alpha":20,
+            "ra-sparc":21, "ra-ppc":22, "nest-i486":23, "nest-i686":24,
+            "nest-alpha":25, "nest-ppc":26
+        }
+        for b in self.builders:
+            s = self.builders_status[b]
+            if s == "OK":
+                c = "green"
+            elif s == "FAIL":
+                c = "red"
+            elif s == "SKIP":
+                c = "blue"
+            else:
+                c = "black"
+            link_pre = ""
+            link_post = ""
+            if bl_archs.has_key(b) and (s == "OK" or s == "FAIL") and len(self.spec) > 5:
+                if self.is_command():
+                    bl_name = "command"
+                else:
+                    bl_name = self.spec[:len(self.spec)-5]
+                path = "/%s/%s/%s.bz2" % (b.replace('-','/'), s, bl_name)
+                is_ok = 0
+                if s == "OK": is_ok = 1
+                link_pre = "<a href=\"http://buildlogs.pld-linux.org/index.php?idx=%d&ok=%d&id=%s\">" \
+                        % (bl_archs[b], is_ok, binascii.b2a_hex(md5.new(path).digest()))
+                link_post = "</a>"
+            builders.append(link_pre + ("<font color='%s'><b>%s:%s</b></font>" %
+                                        (c, b, s)) + link_post)
+        f.write("%s]</small></li>\n" % string.join(builders))
 
-  def bconds_string(self):
-    r = ""
-    for b in self.bconds_with:
-      r = r + " --with " + b
-    for b in self.bconds_without:
-      r = r + " --without " + b
-    return r
-    
-  def write_to(self, f):
-    f.write("""
+    def bconds_string(self):
+        r = ""
+        for b in self.bconds_with:
+            r = r + " --with " + b
+        for b in self.bconds_without:
+            r = r + " --without " + b
+        return r
+        
+    def write_to(self, f):
+        f.write("""
          <batch id='%s' depends-on='%s'>
            <src-rpm>%s</src-rpm>
            <command flags="%s">%s</command>
            <spec>%s</spec>
            <branch>%s</branch>
            <info>%s</info>\n""" % (self.b_id, 
-             string.join(map(lambda (b): b.b_id, self.depends_on)),
-             escape(self.src_rpm), 
-             escape(' '.join(self.command_flags)), escape(self.command),
-             escape(self.spec), escape(self.branch), escape(self.info)))
-    for b in self.bconds_with:
-      f.write("           <with>%s</with>\n" % escape(b))
-    for b in self.bconds_without:
-      f.write("           <without>%s</without>\n" % escape(b))
-    for b in self.builders:
-      f.write("           <builder status='%s'>%s</builder>\n" % \
-                        (escape(self.builders_status[b]), escape(b)))
-    f.write("         </batch>\n")
-    
-  def log_line(self, l):
-    log.notice(l)
-    if self.logfile != None:
-      util.append_to(self.logfile, l)
+                 string.join(map(lambda (b): b.b_id, self.depends_on)),
+                 escape(self.src_rpm), 
+                 escape(' '.join(self.command_flags)), escape(self.command),
+                 escape(self.spec), escape(self.branch), escape(self.info)))
+        for b in self.bconds_with:
+            f.write("           <with>%s</with>\n" % escape(b))
+        for b in self.bconds_without:
+            f.write("           <without>%s</without>\n" % escape(b))
+        for b in self.builders:
+            f.write("           <builder status='%s'>%s</builder>\n" % \
+                                                (escape(self.builders_status[b]), escape(b)))
+        f.write("         </batch>\n")
+        
+    def log_line(self, l):
+        log.notice(l)
+        if self.logfile != None:
+            util.append_to(self.logfile, l)
 
-  def expand_builders(batch, all_builders):
-    all = []
-    for bld in batch.builders:
-      res = []
-      for my_bld in all_builders:
-        if fnmatch.fnmatch(my_bld, bld):
-          res.append(my_bld)
-      if res != []:
-        all.extend(res)
-      else:
-        all.append(bld)
-    batch.builders = all
+    def expand_builders(batch, all_builders):
+        all = []
+        for bld in batch.builders:
+            res = []
+            for my_bld in all_builders:
+                if fnmatch.fnmatch(my_bld, bld):
+                    res.append(my_bld)
+            if res != []:
+                all.extend(res)
+            else:
+                all.append(bld)
+        batch.builders = all
 
 class Notification:
-  def __init__(self, e):
-    self.batches = []
-    self.kind = 'notification'
-    self.group_id = attr(e, "group-id")
-    self.builder = attr(e, "builder")
-    self.batches = {}
-    for c in e.childNodes:
-      if is_blank(c): continue
-      if c.nodeType != Element.ELEMENT_NODE:
-        log.panic("xml: evil notification child %d" % c.nodeType)
-      if c.nodeName == "batch":
-        id = attr(c, "id")
-        status = attr(c, "status")
-        if status != "OK" and status != "FAIL" and status != "SKIP":
-          log.panic("xml notification: bad status: %s" % self.status)
-        self.batches[id] = status
-      else:
-        log.panic("xml: evil notification child (%s)" % c.nodeName)
+    def __init__(self, e):
+        self.batches = []
+        self.kind = 'notification'
+        self.group_id = attr(e, "group-id")
+        self.builder = attr(e, "builder")
+        self.batches = {}
+        for c in e.childNodes:
+            if is_blank(c): continue
+            if c.nodeType != Element.ELEMENT_NODE:
+                log.panic("xml: evil notification child %d" % c.nodeType)
+            if c.nodeName == "batch":
+                id = attr(c, "id")
+                status = attr(c, "status")
+                if status != "OK" and status != "FAIL" and status != "SKIP":
+                    log.panic("xml notification: bad status: %s" % self.status)
+                self.batches[id] = status
+            else:
+                log.panic("xml: evil notification child (%s)" % c.nodeName)
 
-  def apply_to(self, q):
-    for r in q.requests:
-      if r.kind == "group":
-        for b in r.batches:
-          if self.batches.has_key(b.b_id):
-            b.builders_status[self.builder] = self.batches[b.b_id]
+    def apply_to(self, q):
+        for r in q.requests:
+            if r.kind == "group":
+                for b in r.batches:
+                    if self.batches.has_key(b.b_id):
+                        b.builders_status[self.builder] = self.batches[b.b_id]
 
 def build_request(e):
-  if e.nodeType != Element.ELEMENT_NODE:
-    log.panic("xml: evil request element")
-  if e.nodeName == "group":
-    return Group(e)
-  elif e.nodeName == "notification":
-    return Notification(e)
-  elif e.nodeName == "command":
-    # FIXME
-    return Command(e)
-  else:
-    log.panic("xml: evil request <%s>" % e.nodeName)
+    if e.nodeType != Element.ELEMENT_NODE:
+        log.panic("xml: evil request element")
+    if e.nodeName == "group":
+        return Group(e)
+    elif e.nodeName == "notification":
+        return Notification(e)
+    elif e.nodeName == "command":
+        # FIXME
+        return Command(e)
+    else:
+        log.panic("xml: evil request <%s>" % e.nodeName)
 
 def parse_request(f):
-  d = parse(f)
-  return build_request(d.documentElement)
-  
+    d = parse(f)
+    return build_request(d.documentElement)
+    
 def parse_requests(f):
-  d = parse(f)
-  res = []
-  for r in d.documentElement.childNodes:
-    if is_blank(r): continue
-    res.append(build_request(r))
-  return res
+    d = parse(f)
+    res = []
+    for r in d.documentElement.childNodes:
+        if is_blank(r): continue
+        res.append(build_request(r))
+    return res
index 6bf5c2628fe311a0d0a3f5c841b97e3734a0a72b..cd39090fbb9ff95dac9e3693c9d04d22b36e0640 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import string
 import signal
 import os
@@ -21,100 +23,100 @@ from config import config, init_conf
 last_count = 0
 
 def alarmalarm(signum, frame):
-  raise IOError, 'TCP connection hung'
+    raise IOError, 'TCP connection hung'
 
 def has_new(control_url):
-  global last_count
-  cnt_f = open(path.last_req_no_file)
-  last_count = int(string.strip(cnt_f.readline()))
-  cnt_f.close()
-  f = None
-  signal.signal(signal.SIGALRM, alarmalarm)
-  try:
-    signal.alarm(240)
-    f = urllib.urlopen(control_url + "/max_req_no")
-    signal.alarm(0)
-  except:
-    signal.alarm(0)
-    log.error("can't fetch %s" % (control_url + "/max_req_no"))
-    sys.exit(1)
-  res = 0
-  if int(string.strip(f.readline())) != last_count:
-    res = 1
-  f.close()
-  return res
+    global last_count
+    cnt_f = open(path.last_req_no_file)
+    last_count = int(string.strip(cnt_f.readline()))
+    cnt_f.close()
+    f = None
+    signal.signal(signal.SIGALRM, alarmalarm)
+    try:
+        signal.alarm(240)
+        f = urllib.urlopen(control_url + "/max_req_no")
+        signal.alarm(0)
+    except:
+        signal.alarm(0)
+        log.error("can't fetch %s" % (control_url + "/max_req_no"))
+        sys.exit(1)
+    res = 0
+    if int(string.strip(f.readline())) != last_count:
+        res = 1
+    f.close()
+    return res
 
 def fetch_queue(control_url):
-  signal.signal(signal.SIGALRM, alarmalarm)
-  try:
-    signal.alarm(240)
-    f = urllib.urlopen(control_url + "/queue.gz")
-    signal.alarm(0)
-  except:
-    signal.alarm(0)
-    log.error("can't fetch %s" % (control_url + "/queue.gz"))
-    sys.exit(1)
-  sio = StringIO.StringIO()
-  util.sendfile(f, sio)
-  f.close()
-  sio.seek(0)
-  f = gzip.GzipFile(fileobj = sio)
-  (signers, body) = gpg.verify_sig(f)
-  u = acl.user_by_email(signers)
-  if u == None:
-    log.alert("queue.gz not signed with signature of valid user: %s" % signers)
-    sys.exit(1)
-  if not u.can_do("sign_queue", "all"):
-    log.alert("user %s is not allowed to sign my queue" % u.login)
-    sys.exit(1)
-  body.seek(0)
-  return request.parse_requests(body)
+    signal.signal(signal.SIGALRM, alarmalarm)
+    try:
+        signal.alarm(240)
+        f = urllib.urlopen(control_url + "/queue.gz")
+        signal.alarm(0)
+    except:
+        signal.alarm(0)
+        log.error("can't fetch %s" % (control_url + "/queue.gz"))
+        sys.exit(1)
+    sio = StringIO.StringIO()
+    util.sendfile(f, sio)
+    f.close()
+    sio.seek(0)
+    f = gzip.GzipFile(fileobj = sio)
+    (signers, body) = gpg.verify_sig(f)
+    u = acl.user_by_email(signers)
+    if u == None:
+        log.alert("queue.gz not signed with signature of valid user: %s" % signers)
+        sys.exit(1)
+    if not u.can_do("sign_queue", "all"):
+        log.alert("user %s is not allowed to sign my queue" % u.login)
+        sys.exit(1)
+    body.seek(0)
+    return request.parse_requests(body)
 
 def handle_reqs(builder, reqs):
-  qpath = path.queue_file + "-" + builder
-  if not os.access(qpath, os.F_OK):
-    util.append_to(qpath, "<queue/>\n")
-  q = B_Queue(qpath)
-  q.lock(0)
-  q.read()
-  for r in reqs:
-    if r.kind != 'group': 
-      raise 'handle_reqs: fatal: huh? %s' % r.kind
-    need_it = 0
-    for b in r.batches:
-      if builder in b.builders:
-        need_it = 1
-    if need_it:
-      log.notice("queued %s (%d) for %s" % (r.id, r.no, builder))
-      q.add(r)
-  q.write()
-  q.unlock()
+    qpath = path.queue_file + "-" + builder
+    if not os.access(qpath, os.F_OK):
+        util.append_to(qpath, "<queue/>\n")
+    q = B_Queue(qpath)
+    q.lock(0)
+    q.read()
+    for r in reqs:
+        if r.kind != 'group': 
+            raise 'handle_reqs: fatal: huh? %s' % r.kind
+        need_it = 0
+        for b in r.batches:
+            if builder in b.builders:
+                need_it = 1
+        if need_it:
+            log.notice("queued %s (%d) for %s" % (r.id, r.no, builder))
+            q.add(r)
+    q.write()
+    q.unlock()
 
 def main():
-  lck = lock.lock("request_fetcher", non_block = True)
-  if lck == None:
-    sys.exit(1)
-  init_conf("")
-  
-  status.push("fetching requests")
-  if has_new(config.control_url):
-    q = fetch_queue(config.control_url)
-    max_no = 0
-    q_new = []
-    for r in q:
-      if r.no > max_no: 
-        max_no = r.no
-      if r.no > last_count:
-        q_new.append(r)
-    for b in config.binary_builders:
-      handle_reqs(b, q_new)
-    f = open(path.last_req_no_file, "w")
-    f.write("%d\n" % max_no)
-    f.close()
-  status.pop()
-  lck.close()
-  
+    lck = lock.lock("request_fetcher", non_block = True)
+    if lck == None:
+        sys.exit(1)
+    init_conf("")
+    
+    status.push("fetching requests")
+    if has_new(config.control_url):
+        q = fetch_queue(config.control_url)
+        max_no = 0
+        q_new = []
+        for r in q:
+            if r.no > max_no: 
+                max_no = r.no
+            if r.no > last_count:
+                q_new.append(r)
+        for b in config.binary_builders:
+            handle_reqs(b, q_new)
+        f = open(path.last_req_no_file, "w")
+        f.write("%d\n" % max_no)
+        f.close()
+    status.pop()
+    lck.close()
+    
 if __name__ == '__main__':
-  # http connection is established (and few bytes transferred through it) 
-  # each $secs seconds.
-  loop.run_loop(main, secs = 10)
+    # http connection is established (and few bytes transferred through it) 
+    # each $secs seconds.
+    loop.run_loop(main, secs = 10)
index ddb9eaf8327b23ee810373f2860e3e8bfd86917d..26c86f8d45c604058dab62c24db68d22851dc529 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import email
 import string
 import time
@@ -19,147 +21,147 @@ from bqueue import B_Queue
 from config import config, init_conf
 
 def check_double_id(id):
-  id_nl = id + "\n"
-  
-  ids = open(path.processed_ids_file)
-  for i in ids.xreadlines():
-    if i == id_nl:
-      # FIXME: security email here?
-      log.alert("request %s already processed" % id)
-      return 1
-  ids.close()
-  
-  ids = open(path.processed_ids_file, "a")
-  ids.write(id_nl)
-  ids.close()
+    id_nl = id + "\n"
+    
+    ids = open(path.processed_ids_file)
+    for i in ids.xreadlines():
+        if i == id_nl:
+            # FIXME: security email here?
+            log.alert("request %s already processed" % id)
+            return 1
+    ids.close()
+    
+    ids = open(path.processed_ids_file, "a")
+    ids.write(id_nl)
+    ids.close()
 
-  return 0
+    return 0
 
 def handle_group(r, user):
-  def fail_mail(msg):
-    if len(r.batches) >= 1:
-      spec = r.batches[0].spec
-    else:
-      spec = "None.spec"
-    log.error("%s: %s" % (spec, msg))
-    m = user.message_to()
-    m.set_headers(subject = "building %s failed" % spec)
-    m.write_line(msg)
-    m.send()
-  
-  lock("request")
-  if check_double_id(r.id):
-    return
+    def fail_mail(msg):
+        if len(r.batches) >= 1:
+            spec = r.batches[0].spec
+        else:
+            spec = "None.spec"
+        log.error("%s: %s" % (spec, msg))
+        m = user.message_to()
+        m.set_headers(subject = "building %s failed" % spec)
+        m.write_line(msg)
+        m.send()
     
+    lock("request")
+    if check_double_id(r.id):
+        return
+        
 
-  for batch in r.batches:
-    if not user.can_do("src", config.builder, batch.branch):
-      fail_mail("user %s is not allowed to src:%s:%s" \
-                  % (user.get_login(), config.builder, batch.branch))
-      return
+    for batch in r.batches:
+        if not user.can_do("src", config.builder, batch.branch):
+            fail_mail("user %s is not allowed to src:%s:%s" \
+                        % (user.get_login(), config.builder, batch.branch))
+            return
 
-    if "upgrade" in r.flags and not user.can_do("upgrade", config.builder, batch.branch):
-      fail_mail("user %s is not allowed to upgrade:%s:%s" \
-                  % (user.get_login(), config.builder, batch.branch))
-      return
+        if "upgrade" in r.flags and not user.can_do("upgrade", config.builder, batch.branch):
+            fail_mail("user %s is not allowed to upgrade:%s:%s" \
+                        % (user.get_login(), config.builder, batch.branch))
+            return
 
-    batch.expand_builders(config.binary_builders)
-    if not batch.is_command() and config.builder in batch.builders:
-      batch.builders.remove(config.builder)
-    for bld in batch.builders:
-      batch.builders_status[bld] = '?'
-      if bld not in config.binary_builders and bld != config.builder:
-        fail_mail("I (src rpm builder '%s') do not handle binary builder '%s', only '%s'" % \
+        batch.expand_builders(config.binary_builders)
+        if not batch.is_command() and config.builder in batch.builders:
+            batch.builders.remove(config.builder)
+        for bld in batch.builders:
+            batch.builders_status[bld] = '?'
+            if bld not in config.binary_builders and bld != config.builder:
+                fail_mail("I (src rpm builder '%s') do not handle binary builder '%s', only '%s'" % \
                         (config.builder, bld, string.join(config.binary_builders)))
-        return
-      if batch.is_command():
-        if not user.can_do("command", bld):
-          fail_mail("user %s is not allowed to command:%s" \
-                        % (user.get_login(), bld))
-          return
-      elif not user.can_do("binary", bld, batch.branch):
-        pkg = batch.spec
-        if pkg.endswith(".spec"):
-          pkg = pkg[:-5]
-        if not user.can_do("binary-" + pkg, bld, batch.branch):
-          fail_mail("user %s is not allowed to binary-%s:%s:%s" \
-                        % (user.get_login(), pkg, bld, batch.branch))
-          return
-  
-  r.priority = user.check_priority(r.priority,config.builder)
-  r.requester = user.get_login()
-  r.requester_email = user.mail_to()
-  r.time = time.time()
-  log.notice("queued %s from %s" % (r.id, user.get_login()))
-  q = B_Queue(path.queue_file)
-  q.lock(0)
-  q.read()
-  q.add(r)
-  q.write()
-  q.unlock()
+                return
+            if batch.is_command():
+                if not user.can_do("command", bld):
+                    fail_mail("user %s is not allowed to command:%s" \
+                                % (user.get_login(), bld))
+                    return
+            elif not user.can_do("binary", bld, batch.branch):
+                pkg = batch.spec
+                if pkg.endswith(".spec"):
+                    pkg = pkg[:-5]
+                if not user.can_do("binary-" + pkg, bld, batch.branch):
+                    fail_mail("user %s is not allowed to binary-%s:%s:%s" \
+                                % (user.get_login(), pkg, bld, batch.branch))
+                    return
+    
+    r.priority = user.check_priority(r.priority,config.builder)
+    r.requester = user.get_login()
+    r.requester_email = user.mail_to()
+    r.time = time.time()
+    log.notice("queued %s from %s" % (r.id, user.get_login()))
+    q = B_Queue(path.queue_file)
+    q.lock(0)
+    q.read()
+    q.add(r)
+    q.write()
+    q.unlock()
 
 def handle_notification(r, user):
-  if not user.can_do("notify", r.builder):
-    log.alert("user %s is not allowed to notify:%s" % (user.login, r.builder))
-  q = B_Queue(path.req_queue_file)
-  q.lock(0)
-  q.read()
-  not_fin = filter(lambda (r): not r.is_done(), q.requests)
-  r.apply_to(q)
-  for r in not_fin:
-    if r.is_done():
-      util.clean_tmp(path.srpms_dir + r.id)
-  now = time.time()
-  def leave_it(r):
-    # for ,,done'' set timeout to 4d
-    if r.is_done() and r.time + 4 * 24 * 60 * 60 < now:
-      return False
-    # and for not ,,done'' set it to 20d
-    if r.time + 20 * 24 * 60 * 60 < now:
-      util.clean_tmp(path.srpms_dir + r.id)
-      return False
-    return True
-  q.requests = filter(leave_it, q.requests)
-  q.write()
-  q.dump(open(path.queue_stats_file, "w"))
-  q.dump_html(open(path.queue_html_stats_file, "w"))
-  os.chmod(path.queue_html_stats_file, 0644)
-  os.chmod(path.queue_stats_file, 0644)
-  q.unlock()
+    if not user.can_do("notify", r.builder):
+        log.alert("user %s is not allowed to notify:%s" % (user.login, r.builder))
+    q = B_Queue(path.req_queue_file)
+    q.lock(0)
+    q.read()
+    not_fin = filter(lambda (r): not r.is_done(), q.requests)
+    r.apply_to(q)
+    for r in not_fin:
+        if r.is_done():
+            util.clean_tmp(path.srpms_dir + r.id)
+    now = time.time()
+    def leave_it(r):
+        # for ,,done'' set timeout to 4d
+        if r.is_done() and r.time + 4 * 24 * 60 * 60 < now:
+            return False
+        # and for not ,,done'' set it to 20d
+        if r.time + 20 * 24 * 60 * 60 < now:
+            util.clean_tmp(path.srpms_dir + r.id)
+            return False
+        return True
+    q.requests = filter(leave_it, q.requests)
+    q.write()
+    q.dump(open(path.queue_stats_file, "w"))
+    q.dump_html(open(path.queue_html_stats_file, "w"))
+    os.chmod(path.queue_html_stats_file, 0644)
+    os.chmod(path.queue_stats_file, 0644)
+    q.unlock()
 
 def handle_request(f):
-  sio = StringIO.StringIO()
-  util.sendfile(f, sio)
-  sio.seek(0)
-  (em, body) = gpg.verify_sig(sio)
-  user = acl.user_by_email(em)
-  if user == None:
-    # FIXME: security email here
-    log.alert("invalid signature, or not in acl %s" % em)
-    return
-  acl.set_current_user(user)
-  status.push("email from %s" % user.login)
-  r = request.parse_request(body)
-  if r.kind == 'group':
-    handle_group(r, user)
-  elif r.kind == 'notification':
-    handle_notification(r, user)
-  else:
-    msg = "%s: don't know how to handle requests of this kind '%s'" \
-                % (user.get_login(), r.kind)
-    log.alert(msg)
-    m = user.message_to()
-    m.set_headers(subject = "unknown request")
-    m.write_line(msg)
-    m.send()
-  status.pop()
+    sio = StringIO.StringIO()
+    util.sendfile(f, sio)
+    sio.seek(0)
+    (em, body) = gpg.verify_sig(sio)
+    user = acl.user_by_email(em)
+    if user == None:
+        # FIXME: security email here
+        log.alert("invalid signature, or not in acl %s" % em)
+        return
+    acl.set_current_user(user)
+    status.push("email from %s" % user.login)
+    r = request.parse_request(body)
+    if r.kind == 'group':
+        handle_group(r, user)
+    elif r.kind == 'notification':
+        handle_notification(r, user)
+    else:
+        msg = "%s: don't know how to handle requests of this kind '%s'" \
+                        % (user.get_login(), r.kind)
+        log.alert(msg)
+        m = user.message_to()
+        m.set_headers(subject = "unknown request")
+        m.write_line(msg)
+        m.send()
+    status.pop()
 
 def main():
-  init_conf("src")
-  status.push("handling email request")
-  handle_request(sys.stdin)
-  status.pop()
-  sys.exit(0)
+    init_conf("src")
+    status.push("handling email request")
+    handle_request(sys.stdin)
+    status.pop()
+    sys.exit(0)
 
 if __name__ == '__main__':
-  wrap.wrap(main)
+    wrap.wrap(main)
index 971d1a070082127a3937b0cca8e3657f9f2bdf0a..0f4c8f0c042f12c58270c82394efcbc560b93b00 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import sys
 import os
 import atexit
@@ -28,9 +30,9 @@ import socket
 socket.myorigsocket=socket.socket
 
 def mysocket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
-  s=socket.myorigsocket(family, type, proto)
-  s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-  return s
+    s=socket.myorigsocket(family, type, proto)
+    s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+    return s
 
 socket.socket=mysocket
 # *HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*
@@ -38,195 +40,196 @@ socket.socket=mysocket
 # this code is duplicated in srpm_builder, but we
 # might want to handle some cases differently here
 def pick_request(q):
-  def mycmp(r1, r2):
-    if r1.kind != 'group' or r2.kind != 'group':
-      raise "non-group requests"
-    pri_diff = cmp(r1.priority, r2.priority)
-    if pri_diff == 0:
-      return cmp(r1.time, r2.time)
-    else:
-      return pri_diff
-  q.requests.sort(mycmp)
-  ret = q.requests[0]
-  return ret
+    def mycmp(r1, r2):
+        if r1.kind != 'group' or r2.kind != 'group':
+            raise "non-group requests"
+        pri_diff = cmp(r1.priority, r2.priority)
+        if pri_diff == 0:
+            return cmp(r1.time, r2.time)
+        else:
+            return pri_diff
+    q.requests.sort(mycmp)
+    ret = q.requests[0]
+    return ret
 
 def fetch_src(r, b):
-  src_url = config.control_url + "/srpms/" + r.id + "/" + b.src_rpm
-  b.log_line("fetching %s" % src_url)
-  start = time.time()
-  good=False
-  while not good:
-    try:
-      good=True
-      f = urllib.urlopen(src_url)
-    except IOError, error:
-      if error[1][0] == 60 or error[1][0] == 110 or error[1][0] == -3 or error[1][0] == 111 or error[1][0] == 61:
-        good=False
-        b.log_line("unable to connect... trying again")
-      else:
-        f = urllib.urlopen(src_url) # So we get the exception logged :)
-
-  o = chroot.popen("cat > %s" % b.src_rpm, mode = "w")
-  bytes = util.sendfile(f, o)
-  f.close()
-  o.close()
-  t = time.time() - start
-  if t == 0:
-    b.log_line("fetched %d bytes" % bytes)
-  else:
-    b.log_line("fetched %d bytes, %.1f K/s" % (bytes, bytes / 1024.0 / t))
+    src_url = config.control_url + "/srpms/" + r.id + "/" + b.src_rpm
+    b.log_line("fetching %s" % src_url)
+    start = time.time()
+    good=False
+    while not good:
+        try:
+            good=True
+            f = urllib.urlopen(src_url)
+        except IOError, error:
+            if error[1][0] == 60 or error[1][0] == 110 or error[1][0] == -3 or error[1][0] == 111 or error[1][0] == 61:
+                good=False
+                b.log_line("unable to connect... trying again")
+            else:
+                f = urllib.urlopen(src_url) # So we get the exception logged :)
+
+    o = chroot.popen("cat > %s" % b.src_rpm, mode = "w")
+    bytes = util.sendfile(f, o)
+    f.close()
+    o.close()
+    t = time.time() - start
+    if t == 0:
+        b.log_line("fetched %d bytes" % bytes)
+    else:
+        b.log_line("fetched %d bytes, %.1f K/s" % (bytes, bytes / 1024.0 / t))
 
 def build_rpm(r, b):
-  status.push("building %s" % b.spec)
-  b.log_line("request from: %s" % r.requester)
-  b.log_line("started at: %s" % time.asctime())
-  fetch_src(r, b)
-  b.log_line("installing srpm: %s" % b.src_rpm)
-  res = chroot.run("rpm -U %s" % b.src_rpm, logfile = b.logfile)
-  chroot.run("rm -f %s" % b.src_rpm, logfile = b.logfile)
-  b.files = []
-  tmpdir = "/tmp/B." + b.b_id[0:6]
-  if res:
-    b.log_line("error: installing src rpm failed")
-    res = 1
-  else:
-    chroot.run("install -m 700 -d %s" % tmpdir)
-    rpmbuild_opt = "%s --target %s-pld-linux" % (b.bconds_string(), config.arch)
-    cmd = "cd rpm/SPECS; TMPDIR=%s nice -n %s rpmbuild -bb %s %s" % \
-          (tmpdir, config.nice, rpmbuild_opt, b.spec)
-    if ("no-install-br" not in r.flags) and install_br.install_br(r, b):
-      res = 1
+    status.push("building %s" % b.spec)
+    b.log_line("request from: %s" % r.requester)
+    b.log_line("started at: %s" % time.asctime())
+    fetch_src(r, b)
+    b.log_line("installing srpm: %s" % b.src_rpm)
+    res = chroot.run("rpm -U %s" % b.src_rpm, logfile = b.logfile)
+    chroot.run("rm -f %s" % b.src_rpm, logfile = b.logfile)
+    b.files = []
+    tmpdir = "/tmp/B." + b.b_id[0:6]
+    if res:
+        b.log_line("error: installing src rpm failed")
+        res = 1
     else:
-      b.log_line("building RPM using: %s" % cmd)
-      res = chroot.run(cmd, logfile = b.logfile)
-      files = util.collect_files(b.logfile)
-      if len(files) > 0:
-        r.chroot_files.extend(files)
-      else:
-        b.log_line("error: No files produced.")
-        res = 1 # FIXME: is it error?
-      b.files = files
-  chroot.run("rm -rf %s; cd rpm/SPECS; rpmbuild --nodeps --nobuild " \
-             "--clean --rmspec --rmsource %s" % \
-             (tmpdir, b.spec), logfile = b.logfile)
-  chroot.run("rm -rf $HOME/rpm/BUILD/*")
-
-  def ll(l):
-    util.append_to(b.logfile, l)
+        chroot.run("install -m 700 -d %s" % tmpdir)
+        rpmbuild_opt = "%s --target %s-pld-linux" % (b.bconds_string(), config.arch)
+        cmd = "cd rpm/SPECS; TMPDIR=%s nice -n %s rpmbuild -bb %s %s" % \
+                    (tmpdir, config.nice, rpmbuild_opt, b.spec)
+        if ("no-install-br" not in r.flags) and install_br.install_br(r, b):
+            res = 1
+        else:
+            b.log_line("building RPM using: %s" % cmd)
+            res = chroot.run(cmd, logfile = b.logfile)
+            files = util.collect_files(b.logfile)
+            if len(files) > 0:
+                r.chroot_files.extend(files)
+            else:
+                b.log_line("error: No files produced.")
+                res = 1 # FIXME: is it error?
+            b.files = files
+    chroot.run("rm -rf %s; cd rpm/SPECS; rpmbuild --nodeps --nobuild " \
+                         "--clean --rmspec --rmsource %s" % \
+                         (tmpdir, b.spec), logfile = b.logfile)
+    chroot.run("rm -rf $HOME/rpm/BUILD/*")
+
+    def ll(l):
+        util.append_to(b.logfile, l)
  
-  if b.files != []:
-    chroot.run("cp -f %s /spools/ready/; poldek --nodiff --mkidxz -s /spools/ready/" % \
-               string.join(b.files), logfile = b.logfile, user = "root")
-    ll("Begin-PLD-Builder-Info")
-    if "upgrade" in r.flags:
-      upgrade.upgrade_from_batch(r, b)
-    else:
-      ll("not upgrading")
-    ll("End-PLD-Builder-Info")
+    if b.files != []:
+        chroot.run("cp -f %s /spools/ready/; poldek --nodiff --mkidxz " \
+                     "-s /spools/ready/" % \
+                     string.join(b.files), logfile = b.logfile, user = "root")
+        ll("Begin-PLD-Builder-Info")
+        if "upgrade" in r.flags:
+            upgrade.upgrade_from_batch(r, b)
+        else:
+            ll("not upgrading")
+        ll("End-PLD-Builder-Info")
 
-  for f in b.files:
-    local = r.tmp_dir + os.path.basename(f)
-    chroot.run("cat %s; rm -f %s" % (f, f), logfile = local)
-    ftp.add(local)
-
-  def uploadinfo(b):
-    c="file:SRPMS:%s\n" % b.src_rpm
     for f in b.files:
-       c=c + "file:ARCH:%s\n" % os.path.basename(f)
-    c=c + "END\n"
-    return c
-
-  if b.files != []:
-    fname = r.tmp_dir + b.src_rpm + ".uploadinfo"
-    f = open(fname, "w")
-    f.write(uploadinfo(b))
-    f.close()
-    ftp.add(fname, "uploadinfo")
+        local = r.tmp_dir + os.path.basename(f)
+        chroot.run("cat %s; rm -f %s" % (f, f), logfile = local)
+        ftp.add(local)
 
-  status.pop()
+    def uploadinfo(b):
+        c="file:SRPMS:%s\n" % b.src_rpm
+        for f in b.files:
+            c=c + "file:ARCH:%s\n" % os.path.basename(f)
+        c=c + "END\n"
+        return c
 
-  return res
+    if b.files != []:
+        fname = r.tmp_dir + b.src_rpm + ".uploadinfo"
+        f = open(fname, "w")
+        f.write(uploadinfo(b))
+        f.close()
+        ftp.add(fname, "uploadinfo")
+
+    status.pop()
+
+    return res
 
 def handle_request(r):
-  ftp.init(r)
-  buildlogs.init(r)
-  build.build_all(r, build_rpm)
-  report.send_report(r, is_src = False)
-  ftp.flush()
-  notify.send()
+    ftp.init(r)
+    buildlogs.init(r)
+    build.build_all(r, build_rpm)
+    report.send_report(r, is_src = False)
+    ftp.flush()
+    notify.send()
 
 def check_load():
-  do_exit = 0
-  try:
-    f = open("/proc/loadavg")
-    if float(string.split(f.readline())[2]) > config.max_load:
-      do_exit = 1
-  except:
-    pass
-  if do_exit:
-    sys.exit(0)
+    do_exit = 0
+    try:
+        f = open("/proc/loadavg")
+        if float(string.split(f.readline())[2]) > config.max_load:
+            do_exit = 1
+    except:
+        pass
+    if do_exit:
+        sys.exit(0)
 
 def main_for(builder):
-  init_conf(builder)
-  # allow only one build in given builder at once
-  if not lock.lock("building-rpm-for-%s" % config.builder, non_block = 1):
-    return
-  # don't kill server
-  check_load()
-  # not more then job_slots builds at once
-  locked = 0
-  for slot in range(config.job_slots):
-    if lock.lock("building-rpm-slot-%d" % slot, non_block = 1):
-      locked = 1
-      break
-  if not locked:
-    return
-
-  status.push("picking request for %s" % config.builder)
-  q = B_Queue(path.queue_file + "-" + config.builder)
-  q.lock(0)
-  q.read()
-  if q.requests == []:
+    init_conf(builder)
+    # allow only one build in given builder at once
+    if not lock.lock("building-rpm-for-%s" % config.builder, non_block = 1):
+        return
+    # don't kill server
+    check_load()
+    # not more then job_slots builds at once
+    locked = 0
+    for slot in range(config.job_slots):
+        if lock.lock("building-rpm-slot-%d" % slot, non_block = 1):
+            locked = 1
+            break
+    if not locked:
+        return
+
+    status.push("picking request for %s" % config.builder)
+    q = B_Queue(path.queue_file + "-" + config.builder)
+    q.lock(0)
+    q.read()
+    if q.requests == []:
+        q.unlock()
+        return
+    req = pick_request(q)
     q.unlock()
-    return
-  req = pick_request(q)
-  q.unlock()
-  status.pop()
-
-  # record fact that we got lock for this builder, load balancer
-  # will use it for fair-queuing
-  l = lock.lock("got-lock")
-  f = open(path.got_lock_file, "a")
-  f.write(config.builder + "\n")
-  f.close()
-  l.close()
-  
-  msg = "handling request %s (%d) for %s from %s" \
-        % (req.id, req.no, config.builder, req.requester)
-  log.notice(msg)
-  status.push(msg)
-  handle_request(req)
-  status.pop()
-
-  def otherreqs(r):
-    if r.no==req.no:
-      return False
-    else:
-      return True
-
-  q = B_Queue(path.queue_file + "-" + config.builder)
-  q.lock(0)
-  q.read()
-  previouslen=len(q.requests)
-  q.requests=filter(otherreqs, q.requests)
-  if len(q.requests)<previouslen:
-    q.write()
-  q.unlock()
-  
+    status.pop()
+
+    # record fact that we got lock for this builder, load balancer
+    # will use it for fair-queuing
+    l = lock.lock("got-lock")
+    f = open(path.got_lock_file, "a")
+    f.write(config.builder + "\n")
+    f.close()
+    l.close()
+    
+    msg = "handling request %s (%d) for %s from %s" \
+            % (req.id, req.no, config.builder, req.requester)
+    log.notice(msg)
+    status.push(msg)
+    handle_request(req)
+    status.pop()
+
+    def otherreqs(r):
+        if r.no==req.no:
+            return False
+        else:
+            return True
+
+    q = B_Queue(path.queue_file + "-" + config.builder)
+    q.lock(0)
+    q.read()
+    previouslen=len(q.requests)
+    q.requests=filter(otherreqs, q.requests)
+    if len(q.requests)<previouslen:
+        q.write()
+    q.unlock()
+    
 def main():
-  if len(sys.argv) < 2:
-    raise "fatal: need to have builder name as first arg"
-  return main_for(sys.argv[1])
-  
+    if len(sys.argv) < 2:
+        raise "fatal: need to have builder name as first arg"
+    return main_for(sys.argv[1])
+    
 if __name__ == '__main__':
-  loop.run_loop(main)
+    loop.run_loop(main)
index 7c64d35e7d3a85163c99a81ac9369e8047a29cea..4ca59575683176d83a765fbc53800d7b3eed4351 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import email
 import string
 import time
@@ -25,129 +27,129 @@ from bqueue import B_Queue
 from config import config, init_conf
 
 def pick_request(q):
-  def mycmp(r1, r2):
-    if r1.kind != 'group' or r2.kind != 'group':
-      raise "non-group requests"
-    pri_diff = cmp(r1.priority, r2.priority)
-    if pri_diff == 0:
-      return cmp(r1.time, r2.time)
-    else:
-      return pri_diff
-  q.requests.sort(mycmp)
-  ret = q.requests[0]
-  q.requests = q.requests[1:]
-  return ret
-  
+    def mycmp(r1, r2):
+        if r1.kind != 'group' or r2.kind != 'group':
+            raise "non-group requests"
+        pri_diff = cmp(r1.priority, r2.priority)
+        if pri_diff == 0:
+            return cmp(r1.time, r2.time)
+        else:
+            return pri_diff
+    q.requests.sort(mycmp)
+    ret = q.requests[0]
+    q.requests = q.requests[1:]
+    return ret
+    
 def store_binary_request(r):
-  new_b = []
-  for b in r.batches:
-    if not b.build_failed: new_b.append(b)
-  if new_b == []:
-    return
-  r.batches = new_b
-  # store new queue and max_req_no for binary builders
-  cnt_f = open(path.max_req_no_file, "r+")
-  num = int(string.strip(cnt_f.read())) + 1
-  r.no = num
-  q = B_Queue(path.req_queue_file)
-  q.lock(0)
-  q.read()
-  q.add(r)
-  q.write()
-  q.dump(open(path.queue_stats_file, "w"))
-  q.dump_html(open(path.queue_html_stats_file, "w"))
-  os.chmod(path.queue_stats_file, 0644)
-  os.chmod(path.queue_html_stats_file, 0644)
-  q.write_signed(path.req_queue_signed_file)
-  os.chmod(path.req_queue_signed_file, 0644)
-  q.unlock()
-  cnt_f.seek(0)
-  cnt_f.write("%d\n" % num)
-  cnt_f.close()
-  os.chmod(path.max_req_no_file, 0644)
+    new_b = []
+    for b in r.batches:
+        if not b.build_failed: new_b.append(b)
+    if new_b == []:
+        return
+    r.batches = new_b
+    # store new queue and max_req_no for binary builders
+    cnt_f = open(path.max_req_no_file, "r+")
+    num = int(string.strip(cnt_f.read())) + 1
+    r.no = num
+    q = B_Queue(path.req_queue_file)
+    q.lock(0)
+    q.read()
+    q.add(r)
+    q.write()
+    q.dump(open(path.queue_stats_file, "w"))
+    q.dump_html(open(path.queue_html_stats_file, "w"))
+    os.chmod(path.queue_stats_file, 0644)
+    os.chmod(path.queue_html_stats_file, 0644)
+    q.write_signed(path.req_queue_signed_file)
+    os.chmod(path.req_queue_signed_file, 0644)
+    q.unlock()
+    cnt_f.seek(0)
+    cnt_f.write("%d\n" % num)
+    cnt_f.close()
+    os.chmod(path.max_req_no_file, 0644)
 
 def transfer_file(r, b):
-  local = path.srpms_dir + r.id + "/" + b.src_rpm
-  f = b.src_rpm_file
-  # export files from chroot
-  chroot.run("cat %s; rm -f %s" % (f, f), logfile = local)
-  os.chmod(local, 0644)
-  ftp.add(local)
+    local = path.srpms_dir + r.id + "/" + b.src_rpm
+    f = b.src_rpm_file
+    # export files from chroot
+    chroot.run("cat %s; rm -f %s" % (f, f), logfile = local)
+    os.chmod(local, 0644)
+    ftp.add(local)
 
-  fname = path.srpms_dir + r.id + "/" + b.src_rpm + ".uploadinfo"
-  f = open(fname, "w")
-  f.write("info:build:%s:requester:%s\ninfo:build:%s:requester_email:%s\nfile:SRPMS:%s\nEND\n" % (b.gb_id, b.requester, b.gb_id, b.requester_email, b.src_rpm))
-  f.close()
-  ftp.add(fname, "uploadinfo")
+    fname = path.srpms_dir + r.id + "/" + b.src_rpm + ".uploadinfo"
+    f = open(fname, "w")
+    f.write("info:build:%s:requester:%s\ninfo:build:%s:requester_email:%s\nfile:SRPMS:%s\nEND\n" % (b.gb_id, b.requester, b.gb_id, b.requester_email, b.src_rpm))
+    f.close()
+    ftp.add(fname, "uploadinfo")
 
 def build_srpm(r, b):
-  status.push("building %s" % b.spec)
-  b.src_rpm = ""
-  builder_opts = "-nu --nodeps"
-  if b.branch and b.branch.startswith(config.tag_prefixes[0]):
-         tag_test=""
-  else:
-         tag_test=" -Tp %s -tt" % (config.tag_prefixes[0],)
-  cmd = ( "cd rpm/SPECS; nice -n %s ./builder %s -bs %s -r %s %s %s 2>&1" %
-               (config.nice, builder_opts, b.bconds_string(), b.branch, 
-                               tag_test, b.spec) )
-  util.append_to(b.logfile, "request from: %s" % r.requester)
-  util.append_to(b.logfile, "started at: %s" % time.asctime())
-  util.append_to(b.logfile, "building SRPM using: %s\n" % cmd)
-  res = chroot.run(cmd, logfile = b.logfile)
-  util.append_to(b.logfile, "exit status %d" % res)
-  files = util.collect_files(b.logfile)
-  if len(files) > 0:
-    if len(files) > 1:
-      util.append_to(b.logfile, "error: More then one file produced: %s" % files)
-      res = 1
-    last = files[len(files) - 1]
-    b.src_rpm_file = last
-    b.src_rpm = os.path.basename(last)
-    r.chroot_files.extend(files)
-  else:
-    util.append_to(b.logfile, "error: No files produced.")
-    res = 1
-  if res == 0:
-    transfer_file(r, b)
-  if res == 0:
-    for pref in config.tag_prefixes:
-      util.append_to(b.logfile, "tagging: %s" % pref)
-      chroot.run("cd rpm/SPECS; ./builder -r %s -Tp %s -Tv %s" % \
-                  (b.branch, pref, b.spec), logfile = b.logfile)
-  chroot.run("cd rpm/SPECS; rpmbuild --nodeps --nobuild " \
-             "--clean --rmspec --rmsource %s" % \
-             b.spec, logfile = b.logfile)
-  status.pop()
-  return res
+    status.push("building %s" % b.spec)
+    b.src_rpm = ""
+    builder_opts = "-nu --nodeps"
+    if b.branch and b.branch.startswith(config.tag_prefixes[0]):
+                    tag_test=""
+    else:
+                    tag_test=" -Tp %s -tt" % (config.tag_prefixes[0],)
+    cmd = ("cd rpm/SPECS; nice -n %s ./builder %s -bs %s -r %s %s %s 2>&1" %
+             (config.nice, builder_opts, b.bconds_string(), b.branch, 
+              tag_test, b.spec))
+    util.append_to(b.logfile, "request from: %s" % r.requester)
+    util.append_to(b.logfile, "started at: %s" % time.asctime())
+    util.append_to(b.logfile, "building SRPM using: %s\n" % cmd)
+    res = chroot.run(cmd, logfile = b.logfile)
+    util.append_to(b.logfile, "exit status %d" % res)
+    files = util.collect_files(b.logfile)
+    if len(files) > 0:
+        if len(files) > 1:
+            util.append_to(b.logfile, "error: More then one file produced: %s" % files)
+            res = 1
+        last = files[len(files) - 1]
+        b.src_rpm_file = last
+        b.src_rpm = os.path.basename(last)
+        r.chroot_files.extend(files)
+    else:
+        util.append_to(b.logfile, "error: No files produced.")
+        res = 1
+    if res == 0:
+        transfer_file(r, b)
+    if res == 0:
+        for pref in config.tag_prefixes:
+            util.append_to(b.logfile, "tagging: %s" % pref)
+            chroot.run("cd rpm/SPECS; ./builder -r %s -Tp %s -Tv %s" % \
+                        (b.branch, pref, b.spec), logfile = b.logfile)
+    chroot.run("cd rpm/SPECS; rpmbuild --nodeps --nobuild " \
+                         "--clean --rmspec --rmsource %s" % \
+                         b.spec, logfile = b.logfile)
+    status.pop()
+    return res
 
 def handle_request(r):
-  os.mkdir(path.srpms_dir + r.id)
-  os.chmod(path.srpms_dir + r.id, 0755)
-  ftp.init(r)
-  buildlogs.init(r)
-  build.build_all(r, build_srpm)
-  report.send_report(r, is_src = True)
-  report.send_cia_report(r, is_src = True)
-  store_binary_request(r)
-  ftp.flush()
+    os.mkdir(path.srpms_dir + r.id)
+    os.chmod(path.srpms_dir + r.id, 0755)
+    ftp.init(r)
+    buildlogs.init(r)
+    build.build_all(r, build_srpm)
+    report.send_report(r, is_src = True)
+    report.send_cia_report(r, is_src = True)
+    store_binary_request(r)
+    ftp.flush()
 
 def main():
-  init_conf("src")
-  if lock("building-srpm", non_block = 1) == None:
-    return
-  status.push("srpm: processing queue")
-  q = B_Queue(path.queue_file)
-  if not q.lock(1): return
-  q.read()
-  if q.requests == []: return
-  r = pick_request(q)
-  q.write()
-  q.unlock()
-  status.pop()
-  status.push("srpm: handling request from %s" % r.requester)
-  handle_request(r)
-  status.pop()
+    init_conf("src")
+    if lock("building-srpm", non_block = 1) == None:
+        return
+    status.push("srpm: processing queue")
+    q = B_Queue(path.queue_file)
+    if not q.lock(1): return
+    q.read()
+    if q.requests == []: return
+    r = pick_request(q)
+    q.write()
+    q.unlock()
+    status.pop()
+    status.push("srpm: handling request from %s" % r.requester)
+    handle_request(r)
+    status.pop()
 
 if __name__ == '__main__':
-  loop.run_loop(main)
+    loop.run_loop(main)
index cd061a90b45e6387a479b29aa70c4a85a532d82d..8591d18f983aa4461b896c5978b174a5119fbc86 100644 (file)
@@ -1,13 +1,15 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 state = []
 email = ""
 admin = ""
 builder_list = ""
 
 def push(s):
-  state.append(s)
+    state.append(s)
 
 def pop():
-  state.pop()
+    state.pop()
 
 def get():
-  return "%s" % state
+    return "%s" % state
index 163d8a8c0894072a876b1c43e96def721a7301c8..3f0d6cf0f5f44d44700fa0caaaa836f8be76d7d2 100644 (file)
@@ -1,43 +1,45 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import time
 import resource
 
 class Time:
-  def __init__(self):
-    x = resource.getrusage(resource.RUSAGE_CHILDREN)
-    self.user_time = x[0]
-    self.sys_time = x[1]
-    self.non_io_faults = x[6]
-    self.io_faults = x[7]
-    self.time = time.time()
-
-  def sub(self, x):
-    self.user_time -= x.user_time
-    self.sys_time -= x.sys_time
-    self.non_io_faults -= x.non_io_faults
-    self.io_faults -= x.io_faults
-    self.time -= x.time
-
-  def format(self):
-    return "user:%.2fs sys:%.2fs real:%.2fs (faults io:%d non-io:%d)" % \
+    def __init__(self):
+        x = resource.getrusage(resource.RUSAGE_CHILDREN)
+        self.user_time = x[0]
+        self.sys_time = x[1]
+        self.non_io_faults = x[6]
+        self.io_faults = x[7]
+        self.time = time.time()
+
+    def sub(self, x):
+        self.user_time -= x.user_time
+        self.sys_time -= x.sys_time
+        self.non_io_faults -= x.non_io_faults
+        self.io_faults -= x.io_faults
+        self.time -= x.time
+
+    def format(self):
+        return "user:%.2fs sys:%.2fs real:%.2fs (faults io:%d non-io:%d)" % \
                 (self.user_time, self.sys_time, self.time, self.io_faults, 
                  self.non_io_faults)
-    
+        
 class Timer:
-  def __init__(self):
-    self.starts = []
+    def __init__(self):
+        self.starts = []
 
-  def start(self):
-    self.starts.append(Time())
+    def start(self):
+        self.starts.append(Time())
 
-  def stop(self):
-    tmp = Time()
-    tmp.sub(self.starts.pop())
-    return tmp.format()
+    def stop(self):
+        tmp = Time()
+        tmp.sub(self.starts.pop())
+        return tmp.format()
 
 t = Timer()
 
 def start():
-  t.start()
+    t.start()
 
 def stop():
-  return t.stop()
+    return t.stop()
index 150eb50c621cbf6a36db8f20193ffea4fc02dcbe..bfea5de657a43791ba721c1c7c3568cf6d5a93e6 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import re
 import string
 
@@ -6,65 +8,65 @@ import util
 import log
 
 hold = [ 
-  'poldek',
-  'rpm-build'
+    'poldek',
+    'rpm-build'
 ]
 
 def close_killset(killset):
-  k = killset.keys()
-  rx = re.compile(r' marks ([^\s]+)-[^-]+-[^-]+$')
-  errors = ""
-  for p in k:
-    if p in hold:
-      del killset[p]
-      errors += "cannot remove %s because it's crucial\n" % p
-    else:
-      f = chroot.popen("poldek --noask --test --erase %s" % p, user = "root")
-      crucial = 0
-      e = []
-      for l in f.xreadlines():
-        m = rx.search(l)
-        if m:
-          pkg = m.group(1)
-          if pkg in hold:
-            errors += "cannot remove %s because it's required by %s, that is crucial\n" % \
-                        (p, pkg)
-            crucial = 1
-          e.append(pkg)
-      f.close()
-      if crucial:
-        del killset[p]
-      else:
-        for p in e:
-          killset[p] = 2
-  return errors
+    k = killset.keys()
+    rx = re.compile(r' marks ([^\s]+)-[^-]+-[^-]+$')
+    errors = ""
+    for p in k:
+        if p in hold:
+            del killset[p]
+            errors += "cannot remove %s because it's crucial\n" % p
+        else:
+            f = chroot.popen("poldek --noask --test --erase %s" % p, user = "root")
+            crucial = 0
+            e = []
+            for l in f.xreadlines():
+                m = rx.search(l)
+                if m:
+                    pkg = m.group(1)
+                    if pkg in hold:
+                        errors += "cannot remove %s because it's required " \
+                                  "by %s, that is crucial\n" % (p, pkg)
+                        crucial = 1
+                    e.append(pkg)
+            f.close()
+            if crucial:
+                del killset[p]
+            else:
+                for p in e:
+                    killset[p] = 2
+    return errors
 
 def upgrade_from_batch(r, b):
-  f = chroot.popen("rpm --test -F %s 2>&1" % string.join(b.files), user = "root")
-  killset = {}
-  rx = re.compile(r' ([^\s]+)-[^-]+-[^-]+$')
-  for l in f.xreadlines():
-    m = rx.search(l)
-    if m: killset[m.group(1)] = 1
-  f.close()
-  if len(killset) != 0:
-    err = close_killset(killset)
-    if err != "":
-      util.append_to(b.logfile, err)
-      log.notice("cannot upgrade rpms")
-      return
-    k = string.join(killset.keys())
-    if 0:
-      b.log_line("removing %s" % k)
-      res = chroot.run("rpm -e %s" % k, logfile = b.logfile, user = "root")
-      if res != 0:
-        b.log_line("package removal failed")
+    f = chroot.popen("rpm --test -F %s 2>&1" % string.join(b.files), user = "root")
+    killset = {}
+    rx = re.compile(r' ([^\s]+)-[^-]+-[^-]+$')
+    for l in f.xreadlines():
+        m = rx.search(l)
+        if m: killset[m.group(1)] = 1
+    f.close()
+    if len(killset) != 0:
+        err = close_killset(killset)
+        if err != "":
+            util.append_to(b.logfile, err)
+            log.notice("cannot upgrade rpms")
+            return
+        k = string.join(killset.keys())
+        if 0:
+            b.log_line("removing %s" % k)
+            res = chroot.run("rpm -e %s" % k, logfile = b.logfile, user = "root")
+            if res != 0:
+                b.log_line("package removal failed")
+                return
+        else:
+            b.log_line("upgrade would need removal of %s" % k)
+            return
+    b.log_line("upgrading packages")
+    res = chroot.run("rpm -Fvh %s" % string.join(b.files), user = "root")
+    if res != 0:
+        b.log_line("package upgrade failed")
         return
-    else:
-      b.log_line("upgrade would need removal of %s" % k)
-      return
-  b.log_line("upgrading packages")
-  res = chroot.run("rpm -Fvh %s" % string.join(b.files), user = "root")
-  if res != 0:
-    b.log_line("package upgrade failed")
-    return
index e1f604710ddc120a3a5bdc084335fa15236d16f3..00816ac48f904a72a184d16ecb2aefd70a9ce679 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import re
 import sys
 import os
@@ -5,43 +7,43 @@ import log
 import string
 
 def pkg_name(nvr):
-  return re.match(r"(.+)-[^-]+-[^-]+", nvr).group(1)
-  
+    return re.match(r"(.+)-[^-]+-[^-]+", nvr).group(1)
+    
 def msg(m):
-  sys.stderr.write(m)
+    sys.stderr.write(m)
 
 def sendfile(src, dst):
-  cnt = 0
-  while 1:
-    s = src.read(10000)
-    if s == "": break
-    cnt += len(s)
-    dst.write(s)
-  return cnt
+    cnt = 0
+    while 1:
+        s = src.read(10000)
+        if s == "": break
+        cnt += len(s)
+        dst.write(s)
+    return cnt
 
 def append_to(log, msg):
-  f = open(log, "a")
-  f.write("%s\n" % msg)
-  f.close()
+    f = open(log, "a")
+    f.write("%s\n" % msg)
+    f.close()
 
 def clean_tmp(dir):
-  # FIXME: use python
-  os.system("rm -f %s/* 2>/dev/null; rmdir %s 2>/dev/null" % (dir, dir))
+    # FIXME: use python
+    os.system("rm -f %s/* 2>/dev/null; rmdir %s 2>/dev/null" % (dir, dir))
 
 def uuid():
-  f = os.popen("uuidgen 2>&1")
-  u = string.strip(f.read())
-  f.close()
-  if len(u) != 36:
-    raise "uuid: fatal, cannot generate uuid: %s" % u
-  return u
+    f = os.popen("uuidgen 2>&1")
+    u = string.strip(f.read())
+    f.close()
+    if len(u) != 36:
+        raise "uuid: fatal, cannot generate uuid: %s" % u
+    return u
 
 def collect_files(log):
-  f = open(log)
-  rx = re.compile(r"^Wrote: (/home.*\.rpm)$")
-  files = []
-  for l in f.xreadlines():
-    m = rx.search(l)
-    if m:
-      files.append(m.group(1))
-  return files
+    f = open(log)
+    rx = re.compile(r"^Wrote: (/home.*\.rpm)$")
+    files = []
+    for l in f.xreadlines():
+        m = rx.search(l)
+        if m:
+            files.append(m.group(1))
+    return files
index e8e2783eb2d65a0fd286d4fd3225187093b02220..44992f98a71aceadfec6edfe3af2f6805d720a91 100644 (file)
@@ -1,3 +1,5 @@
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
 import sys
 import log
 import traceback
@@ -11,21 +13,21 @@ import time
 import status
 
 def wrap(main):
-  try:
-    main()
-  except:
-    exctype, value = sys.exc_info()[:2]
-    if exctype == SystemExit:
-      sys.exit(value)
-    s = StringIO.StringIO()
-    traceback.print_exc(file = s, limit = 20)
-    log.alert("fatal python exception")
-    log.alert(s.getvalue())
-    log.alert("during: %s" % status.get())
-    
-    # don't use mailer.py; it safer this way
-    f = os.popen("/usr/sbin/sendmail -t", "w")
-    f.write("""Subject: builder failure
+    try:
+        main()
+    except:
+        exctype, value = sys.exc_info()[:2]
+        if exctype == SystemExit:
+            sys.exit(value)
+        s = StringIO.StringIO()
+        traceback.print_exc(file = s, limit = 20)
+        log.alert("fatal python exception")
+        log.alert(s.getvalue())
+        log.alert("during: %s" % status.get())
+        
+        # don't use mailer.py; it safer this way
+        f = os.popen("/usr/sbin/sendmail -t", "w")
+        f.write("""Subject: builder failure
 To: %s
 Cc: %s, %s
 Date: %s
@@ -34,9 +36,9 @@ X-PLD-Builder: fatal error report
 %s
 
 during: %s
-""" % (status.admin, status.email, status.builder_list, \
-       time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), \
-       s.getvalue(), status.get()))
-    f.close()
+""" % (status.admin, status.email, status.builder_list, 
+             time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
+             s.getvalue(), status.get()))
+        f.close()
 
-    sys.exit(1)
+        sys.exit(1)
This page took 0.407582 seconds and 4 git commands to generate.