+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import ConfigParser
import string
import fnmatch
from config import config
class User:
- def __init__(self, p, login):
- self.login = login
- self.privs = []
- self.gpg_emails = []
- self.mailto = ""
-
- if p.has_option(login, "gpg_emails"):
- self.gpg_emails = string.split(p.get(login, "gpg_emails"))
- else:
- log.panic("acl: [%s] has no gpg_emails" % login)
-
- if p.has_option(login, "mailto"):
- self.mailto = p.get(login, "mailto")
- else:
- if len(self.gpg_emails) > 0:
- self.mailto = self.gpg_emails[0]
-
- if p.has_option(login, "privs"):
- for p in string.split(p.get(login, "privs")):
- l = string.split(p, ":")
- if len(l) == 2:
- p+=":*"
- if len(l) not in (2,3) or l[0] == "" or l[1] == "":
- log.panic("acl: invalid priv format: '%s' [%s]" % (p, login))
+ def __init__(self, p, login):
+ self.login = login
+ self.privs = []
+ self.gpg_emails = []
+ self.mailto = ""
+
+ if p.has_option(login, "gpg_emails"):
+ self.gpg_emails = string.split(p.get(login, "gpg_emails"))
+ else:
+ log.panic("acl: [%s] has no gpg_emails" % login)
+
+ if p.has_option(login, "mailto"):
+ self.mailto = p.get(login, "mailto")
+ else:
+ if len(self.gpg_emails) > 0:
+ self.mailto = self.gpg_emails[0]
+
+ if p.has_option(login, "privs"):
+ for p in string.split(p.get(login, "privs")):
+ l = string.split(p, ":")
+ if len(l) == 2:
+ p+=":*"
+ if len(l) not in (2,3) or l[0] == "" or l[1] == "":
+ log.panic("acl: invalid priv format: '%s' [%s]" % (p, login))
+ else:
+ self.privs.append(p)
else:
- self.privs.append(p)
- else:
- log.panic("acl: [%s] has no privs" % login)
+ log.panic("acl: [%s] has no privs" % login)
- def can_do(self, what, where, branch=None):
- if branch:
- action = "%s:%s:%s" % (what, where, branch)
- else:
- action = "%s:%s:N-A" % (what, where)
- for priv in self.privs:
- if priv[0] == "!":
- ret = 0
- priv = priv[1:]
- else:
- ret = 1
- pwhat,pwhere,pbranch=priv.split(":")
- for pbranch in pbranch.split(","):
- priv="%s:%s:%s" % (pwhat,pwhere,pbranch)
- if fnmatch.fnmatch(action, priv):
- return ret
- return 0
+ def can_do(self, what, where, branch=None):
+ if branch:
+ action = "%s:%s:%s" % (what, where, branch)
+ else:
+ action = "%s:%s:N-A" % (what, where)
+ for priv in self.privs:
+ if priv[0] == "!":
+ ret = 0
+ priv = priv[1:]
+ else:
+ ret = 1
+ pwhat,pwhere,pbranch=priv.split(":")
+ for pbranch in pbranch.split(","):
+ priv="%s:%s:%s" % (pwhat,pwhere,pbranch)
+ if fnmatch.fnmatch(action, priv):
+ return ret
+ return 0
- def check_priority(self, prio, where):
- for priv in self.privs:
- val,builder=priv.split(":")[0:2]
- if fnmatch.fnmatch(where, builder):
- try:
- val=int(val)
- except ValueError:
- continue
- if prio>=val:
- return prio
- else:
- return val
- if prio<10:
- prio=10
- return prio
+ def check_priority(self, prio, where):
+ for priv in self.privs:
+ val,builder=priv.split(":")[0:2]
+ if fnmatch.fnmatch(where, builder):
+ try:
+ val=int(val)
+ except ValueError:
+ continue
+ if prio>=val:
+ return prio
+ else:
+ return val
+ if prio<10:
+ prio=10
+ return prio
- def mail_to(self):
- return self.mailto
+ def mail_to(self):
+ return self.mailto
- def message_to(self):
- m = Message()
- m.set_headers(to = self.mail_to(), cc = config.builder_list)
- return m
+ def message_to(self):
+ m = Message()
+ m.set_headers(to = self.mail_to(), cc = config.builder_list)
+ return m
- def get_login(self):
- return self.login
+ def get_login(self):
+ return self.login
class ACL_Conf:
- def __init__(self):
- self.current_user = None
- status.push("reading acl.conf")
- p = ConfigParser.ConfigParser()
- p.readfp(open(path.acl_conf))
- self.users = {}
- for login in p.sections():
- if self.users.has_key(login):
- log.panic("acl: duplicate login: %s" % login)
- continue
- user = User(p, login)
- for e in user.gpg_emails:
- if self.users.has_key(e):
- log.panic("acl: user email colision %s <-> %s" % \
- (self.users[e].login, login))
- else:
- self.users[e] = user
- self.users[login] = user
- status.pop()
-
- def user_by_email(self, ems):
- for e in ems:
- if self.users.has_key(e):
- return self.users[e]
- return None
+ def __init__(self):
+ self.current_user = None
+ status.push("reading acl.conf")
+ p = ConfigParser.ConfigParser()
+ p.readfp(open(path.acl_conf))
+ self.users = {}
+ for login in p.sections():
+ if self.users.has_key(login):
+ log.panic("acl: duplicate login: %s" % login)
+ continue
+ user = User(p, login)
+ for e in user.gpg_emails:
+ if self.users.has_key(e):
+ log.panic("acl: user email colision %s <-> %s" % \
+ (self.users[e].login, login))
+ else:
+ self.users[e] = user
+ self.users[login] = user
+ status.pop()
+
+ def user_by_email(self, ems):
+ for e in ems:
+ if self.users.has_key(e):
+ return self.users[e]
+ return None
- def user(self, l):
- if not self.users.has_key(l):
- log.panic("no such user: %s" % l)
- return self.users[l]
+ def user(self, l):
+ if not self.users.has_key(l):
+ log.panic("no such user: %s" % l)
+ return self.users[l]
- def set_current_user(self, u):
- self.current_user = u
- if u != None:
- status.email = u.mail_to()
+ def set_current_user(self, u):
+ self.current_user = u
+ if u != None:
+ status.email = u.mail_to()
- def current_user_login(self):
- if self.current_user != None:
- return self.current_user.login
- else:
- return ""
+ def current_user_login(self):
+ if self.current_user != None:
+ return self.current_user.login
+ else:
+ return ""
acl = ACL_Conf()
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import re
import gzip
import time
import log
class B_Queue:
- def __init__(self, filename):
- self.name = filename
- self.requests = []
- self.fd = None
-
- def dump(self, f):
- self.requests.reverse()
- for r in self.requests:
- r.dump(f)
- self.requests.reverse()
-
- def dump_html(self, f):
- f.write("<html><head><title>PLD builder queue</title></head><body>\n")
- self.requests.reverse()
- for r in self.requests:
- r.dump_html(f)
- self.requests.reverse()
- f.write("</body></html>\n")
-
- # read possibly compressed, signed queue
- def read_signed(self):
- if re.search(r"\.gz$", self.name):
- f = gzip.open(self.name)
- else:
- f = open(self.name)
- (signers, body) = gpg.verify_sig(f)
- self.signers = signers
- self.requests = request.parse_requests(body)
+ def __init__(self, filename):
+ self.name = filename
+ self.requests = []
+ self.fd = None
- def _open(self):
- if self.fd == None:
- if os.access(self.name, os.F_OK):
- self.fd = open(self.name, "r+")
- else:
- self.fd = open(self.name, "w+")
+ def dump(self, f):
+ self.requests.reverse()
+ for r in self.requests:
+ r.dump(f)
+ self.requests.reverse()
+
+ def dump_html(self, f):
+ f.write("<html><head><title>PLD builder queue</title></head><body>\n")
+ self.requests.reverse()
+ for r in self.requests:
+ r.dump_html(f)
+ self.requests.reverse()
+ f.write("</body></html>\n")
- def read(self):
- self._open()
- self.signers = []
- if string.strip(self.fd.read()) == "":
- # empty file, don't choke
- self.requests = []
- return
- self.fd.seek(0)
- self.requests = request.parse_requests(self.fd)
+ # read possibly compressed, signed queue
+ def read_signed(self):
+ if re.search(r"\.gz$", self.name):
+ f = gzip.open(self.name)
+ else:
+ f = open(self.name)
+ (signers, body) = gpg.verify_sig(f)
+ self.signers = signers
+ self.requests = request.parse_requests(body)
- def _write_to(self, f):
- f.write("<queue>\n")
- for r in self.requests:
- r.write_to(f)
- f.write("</queue>\n")
+ def _open(self):
+ if self.fd == None:
+ if os.access(self.name, os.F_OK):
+ self.fd = open(self.name, "r+")
+ else:
+ self.fd = open(self.name, "w+")
+
+ def read(self):
+ self._open()
+ self.signers = []
+ if string.strip(self.fd.read()) == "":
+ # empty file, don't choke
+ self.requests = []
+ return
+ self.fd.seek(0)
+ self.requests = request.parse_requests(self.fd)
- def write(self):
- self._open()
- self.fd.seek(0)
- self.fd.truncate(0)
- self._write_to(self.fd)
- self.fd.flush()
+ def _write_to(self, f):
+ f.write("<queue>\n")
+ for r in self.requests:
+ r.write_to(f)
+ f.write("</queue>\n")
- def lock(self, no_block):
- self._open()
- op = fcntl.LOCK_EX
- if no_block:
- op = op + fcntl.LOCK_NB
- try:
- fcntl.flock(self.fd, op)
- return 1
- except IOError:
- return 0
-
- def unlock(self):
- fcntl.flock(self.fd, fcntl.LOCK_UN)
+ def write(self):
+ self._open()
+ self.fd.seek(0)
+ self.fd.truncate(0)
+ self._write_to(self.fd)
+ self.fd.flush()
+
+ def lock(self, no_block):
+ self._open()
+ op = fcntl.LOCK_EX
+ if no_block:
+ op = op + fcntl.LOCK_NB
+ try:
+ fcntl.flock(self.fd, op)
+ return 1
+ except IOError:
+ return 0
+
+ def unlock(self):
+ fcntl.flock(self.fd, fcntl.LOCK_UN)
- def write_signed(self, name):
- sio = StringIO.StringIO()
- self._write_to(sio)
- sio.seek(0)
- sio = gpg.sign(sio)
- if os.access(name, os.F_OK): os.unlink(name)
- if re.search(r"\.gz$", name):
- f = gzip.open(name, "w", 6)
- else:
- f = open(name, "w")
- util.sendfile(sio, f)
- f.close()
+ def write_signed(self, name):
+ sio = StringIO.StringIO()
+ self._write_to(sio)
+ sio.seek(0)
+ sio = gpg.sign(sio)
+ if os.access(name, os.F_OK): os.unlink(name)
+ if re.search(r"\.gz$", name):
+ f = gzip.open(name, "w", 6)
+ else:
+ f = open(name, "w")
+ util.sendfile(sio, f)
+ f.close()
- def add(self, req):
- self.requests.append(req)
+ def add(self, req):
+ self.requests.append(req)
- def value(self):
- return self.requests
+ def value(self):
+ return self.requests
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import string
import os
import atexit
def run_command(batch):
- if "no-chroot" in batch.command_flags:
- c = "%s >> %s 2>&1" % (batch.command, batch.logfile)
- f = os.popen(c)
- for l in f.xreadlines():
- pass
- r = f.close()
- if r == None:
- return 0
+ if "no-chroot" in batch.command_flags:
+ c = "%s >> %s 2>&1" % (batch.command, batch.logfile)
+ f = os.popen(c)
+ for l in f.xreadlines():
+ pass
+ r = f.close()
+ if r == None:
+ return 0
+ else:
+ return r
else:
- return r
- else:
- user = "root"
- if "as-builder" in batch.command_flags:
- user = "builder"
- return chroot.run(batch.command, logfile = batch.logfile, user = user)
+ user = "root"
+ if "as-builder" in batch.command_flags:
+ user = "builder"
+ return chroot.run(batch.command, logfile = batch.logfile, user = user)
def build_all(r, build_fnc):
- status.email = r.requester_email
- notify.begin(r)
- tmp = path.spool_dir + util.uuid() + "/"
- r.tmp_dir = tmp
- os.mkdir(tmp)
- atexit.register(util.clean_tmp, tmp)
+ status.email = r.requester_email
+ notify.begin(r)
+ tmp = path.spool_dir + util.uuid() + "/"
+ r.tmp_dir = tmp
+ os.mkdir(tmp)
+ atexit.register(util.clean_tmp, tmp)
- log.notice("started processing %s" % r.id)
- r.chroot_files = []
- r.some_ok = 0
- for batch in r.batches:
- can_build = 1
- failed_dep = ""
- for dep in batch.depends_on:
- if dep.build_failed:
- can_build = 0
- failed_dep = dep.spec
-
- if batch.is_command() and can_build:
- batch.logfile = tmp + "command"
- if config.builder in batch.builders:
- log.notice("running %s" % batch.command)
- stopwatch.start()
- batch.build_failed = run_command(batch)
- if batch.build_failed:
- log.notice("running %s FAILED" % batch.command)
- notify.add_batch(batch, "FAIL")
+ log.notice("started processing %s" % r.id)
+ r.chroot_files = []
+ r.some_ok = 0
+ for batch in r.batches:
+ can_build = 1
+ failed_dep = ""
+ for dep in batch.depends_on:
+ if dep.build_failed:
+ can_build = 0
+ failed_dep = dep.spec
+
+ if batch.is_command() and can_build:
+ batch.logfile = tmp + "command"
+ if config.builder in batch.builders:
+ log.notice("running %s" % batch.command)
+ stopwatch.start()
+ batch.build_failed = run_command(batch)
+ if batch.build_failed:
+ log.notice("running %s FAILED" % batch.command)
+ notify.add_batch(batch, "FAIL")
+ else:
+ r.some_ok = 1
+ log.notice("running %s OK" % batch.command)
+ notify.add_batch(batch, "OK")
+ batch.build_time = stopwatch.stop()
+ report.add_pld_builder_info(batch)
+ buildlogs.add(batch.logfile, failed = batch.build_failed)
+ else:
+ log.notice("not running command, not for me.")
+ batch.build_failed = 0
+ batch.log_line("queued command %s for other builders" % batch.command)
+ r.some_ok = 1
+ buildlogs.add(batch.logfile, failed = batch.build_failed)
+ elif can_build:
+ log.notice("building %s" % batch.spec)
+ stopwatch.start()
+ batch.logfile = tmp + batch.spec + ".log"
+ batch.gb_id=r.id
+ batch.requester=r.requester
+ batch.requester_email=r.requester_email
+ batch.build_failed = build_fnc(r, batch)
+ if batch.build_failed:
+ log.notice("building %s FAILED" % batch.spec)
+ notify.add_batch(batch, "FAIL")
+ else:
+ r.some_ok = 1
+ log.notice("building %s OK" % batch.spec)
+ notify.add_batch(batch, "OK")
+ batch.build_time = stopwatch.stop()
+ report.add_pld_builder_info(batch)
+ buildlogs.add(batch.logfile, failed = batch.build_failed)
else:
- r.some_ok = 1
- log.notice("running %s OK" % batch.command)
- notify.add_batch(batch, "OK")
- batch.build_time = stopwatch.stop()
- report.add_pld_builder_info(batch)
- buildlogs.add(batch.logfile, failed = batch.build_failed)
- else:
- log.notice("not running command, not for me.")
- batch.build_failed = 0
- batch.log_line("queued command %s for other builders" % batch.command)
- r.some_ok = 1
- buildlogs.add(batch.logfile, failed = batch.build_failed)
- elif can_build:
- log.notice("building %s" % batch.spec)
- stopwatch.start()
- batch.logfile = tmp + batch.spec + ".log"
- batch.gb_id=r.id
- batch.requester=r.requester
- batch.requester_email=r.requester_email
- batch.build_failed = build_fnc(r, batch)
- if batch.build_failed:
- log.notice("building %s FAILED" % batch.spec)
- notify.add_batch(batch, "FAIL")
- else:
- r.some_ok = 1
- log.notice("building %s OK" % batch.spec)
- notify.add_batch(batch, "OK")
- batch.build_time = stopwatch.stop()
- report.add_pld_builder_info(batch)
- buildlogs.add(batch.logfile, failed = batch.build_failed)
- else:
- batch.build_failed = 1
- batch.skip_reason = "SKIPED [%s failed]" % failed_dep
- batch.logfile = None
- batch.build_time = ""
- log.notice("building %s %s" % (batch.spec, batch.skip_reason))
- notify.add_batch(batch, "SKIP")
-
- buildlogs.flush()
- chroot.run("rm -f %s" % string.join(r.chroot_files))
+ batch.build_failed = 1
+ batch.skip_reason = "SKIPED [%s failed]" % failed_dep
+ batch.logfile = None
+ batch.build_time = ""
+ log.notice("building %s %s" % (batch.spec, batch.skip_reason))
+ notify.add_batch(batch, "SKIP")
+
+ buildlogs.flush()
+ chroot.run("rm -f %s" % string.join(r.chroot_files))
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import path
import time
import os
import util
class Buildlogs_Queue:
- def __init__(self):
- self.queue = []
- self.some_failed = 0
-
- def init(self, g):
- self.requester_email = g.requester_email
-
- def add(self, logfile, failed):
- # if /dev/null, don't even bother to store it
- if config.buildlogs_url == "/dev/null":
- return
- name = re.sub(r"\.spec\.log", "", os.path.basename(logfile)) + ".bz2"
- id = util.uuid()
- os.system("bzip2 --best --force < %s > %s" \
- % (logfile, path.buildlogs_queue_dir + id))
-
- if failed: s = "FAIL"
- else: s = "OK"
- f = open(path.buildlogs_queue_dir + id + ".info", "w")
- f.write("Status: %s\nEND\n" % s)
- f.close()
-
- self.queue.append({'name': name, 'id': id, 'failed': failed})
-
- def flush(self):
- def desc(l):
- return """Target: %s/%s
+ def __init__(self):
+ self.queue = []
+ self.some_failed = 0
+
+ def init(self, g):
+ self.requester_email = g.requester_email
+
+ def add(self, logfile, failed):
+ # if /dev/null, don't even bother to store it
+ if config.buildlogs_url == "/dev/null":
+ return
+ name = re.sub(r"\.spec\.log", "", os.path.basename(logfile)) + ".bz2"
+ id = util.uuid()
+ os.system("bzip2 --best --force < %s > %s" \
+ % (logfile, path.buildlogs_queue_dir + id))
+
+ if failed: s = "FAIL"
+ else: s = "OK"
+ f = open(path.buildlogs_queue_dir + id + ".info", "w")
+ f.write("Status: %s\nEND\n" % s)
+ f.close()
+
+ self.queue.append({'name': name, 'id': id, 'failed': failed})
+
+ def flush(self):
+ def desc(l):
+ return """Target: %s/%s
Builder: %s
Time: %d
Type: buildlog
Requester: %s
END
""" % (config.buildlogs_url, l['name'], config.builder, time.time(), self.requester_email)
-
- for l in self.queue:
- f = open(path.buildlogs_queue_dir + l['id'] + ".desc", "w")
- f.write(desc(l))
- f.close()
+
+ for l in self.queue:
+ f = open(path.buildlogs_queue_dir + l['id'] + ".desc", "w")
+ f.write(desc(l))
+ f.close()
queue = Buildlogs_Queue()
def init(r):
- queue.init(r)
+ queue.init(r)
def add(logfile, failed):
- "Add new buildlog with specified status."
- queue.add(logfile, failed)
+ "Add new buildlog with specified status."
+ queue.add(logfile, failed)
def flush():
- "Send buildlogs to server."
- queue.flush()
+ "Send buildlogs to server."
+ queue.flush()
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import os
import re
from config import config
def quote(cmd):
- return re.sub("([\"\\\\$`])", r"\\\1", cmd)
-
+ return re.sub("([\"\\\\$`])", r"\\\1", cmd)
+
def command(cmd, user = None):
- if user == None:
- user = config.builder_user
- return "%s sudo chroot %s su - %s -c \"export LC_ALL=C; %s\"" \
- % (config.sudo_chroot_wrapper, config.chroot, user, quote(cmd))
-
+ if user == None:
+ user = config.builder_user
+ return "%s sudo chroot %s su - %s -c \"export LC_ALL=C; %s\"" \
+ % (config.sudo_chroot_wrapper, config.chroot, user, quote(cmd))
+
def command_sh(cmd):
- return "%s sudo chroot %s /bin/sh -c \"export LC_ALL=C; %s\"" \
- % (config.sudo_chroot_wrapper, config.chroot, quote(cmd))
+ return "%s sudo chroot %s /bin/sh -c \"export LC_ALL=C; %s\"" \
+ % (config.sudo_chroot_wrapper, config.chroot, quote(cmd))
def popen(cmd, user = "builder", mode = "r"):
- f = os.popen(command(cmd, user), mode)
- return f
-
+ f = os.popen(command(cmd, user), mode)
+ return f
+
def run(cmd, user = "builder", logfile = None):
- c = command(cmd, user)
- if logfile != None:
- c = "%s >> %s 2>&1" % (c, logfile)
- f = os.popen(c)
- for l in f.xreadlines():
- pass
- r = f.close()
- if r == None:
- return 0
- else:
- return r
+ c = command(cmd, user)
+ if logfile != None:
+ c = "%s >> %s 2>&1" % (c, logfile)
+ f = os.popen(c)
+ for l in f.xreadlines():
+ pass
+ r = f.close()
+ if r == None:
+ return 0
+ else:
+ return r
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import ConfigParser
import string
import os
syslog_facilities = {
- 'kern': syslog.LOG_KERN,
- 'user': syslog.LOG_USER,
- 'mail': syslog.LOG_MAIL,
- 'daemon': syslog.LOG_DAEMON,
- 'auth': syslog.LOG_AUTH,
- 'lpr': syslog.LOG_LPR,
- 'news': syslog.LOG_NEWS,
- 'uucp': syslog.LOG_UUCP,
- 'cron': syslog.LOG_CRON,
- 'local0': syslog.LOG_LOCAL0,
- 'local1': syslog.LOG_LOCAL1,
- 'local2': syslog.LOG_LOCAL2,
- 'local3': syslog.LOG_LOCAL3,
- 'local4': syslog.LOG_LOCAL4,
- 'local5': syslog.LOG_LOCAL5,
- 'local6': syslog.LOG_LOCAL6,
- 'local7': syslog.LOG_LOCAL7
+ 'kern': syslog.LOG_KERN,
+ 'user': syslog.LOG_USER,
+ 'mail': syslog.LOG_MAIL,
+ 'daemon': syslog.LOG_DAEMON,
+ 'auth': syslog.LOG_AUTH,
+ 'lpr': syslog.LOG_LPR,
+ 'news': syslog.LOG_NEWS,
+ 'uucp': syslog.LOG_UUCP,
+ 'cron': syslog.LOG_CRON,
+ 'local0': syslog.LOG_LOCAL0,
+ 'local1': syslog.LOG_LOCAL1,
+ 'local2': syslog.LOG_LOCAL2,
+ 'local3': syslog.LOG_LOCAL3,
+ 'local4': syslog.LOG_LOCAL4,
+ 'local5': syslog.LOG_LOCAL5,
+ 'local6': syslog.LOG_LOCAL6,
+ 'local7': syslog.LOG_LOCAL7
}
class Builder_Conf:
- def __init__(self):
- self.done = 0
- pass
+ def __init__(self):
+ self.done = 0
+ pass
- def read(self, builder):
- p = ConfigParser.ConfigParser()
- def get(o, d = None):
- if p.has_option(builder, o):
- return string.strip(p.get(builder, o))
- elif p.has_option("all", o):
- return string.strip(p.get("all", o))
- elif d != None:
- return d
- else:
- log.panic("cannot find %s::%s" % (builder, o))
-
- p.readfp(open(path.builder_conf))
+ def read(self, builder):
+ p = ConfigParser.ConfigParser()
+ def get(o, d = None):
+ if p.has_option(builder, o):
+ return string.strip(p.get(builder, o))
+ elif p.has_option("all", o):
+ return string.strip(p.get("all", o))
+ elif d != None:
+ return d
+ else:
+ log.panic("cannot find %s::%s" % (builder, o))
+
+ p.readfp(open(path.builder_conf))
- if p.has_option("all", "syslog"):
- f = p.get("all", "syslog")
- if f != "":
- if syslog_facilities.has_key(f):
- log.open_syslog("builder", syslog_facilities[f])
- else:
- log.panic("no such syslog facility: %s" % f)
+ if p.has_option("all", "syslog"):
+ f = p.get("all", "syslog")
+ if f != "":
+ if syslog_facilities.has_key(f):
+ log.open_syslog("builder", syslog_facilities[f])
+ else:
+ log.panic("no such syslog facility: %s" % f)
- if builder == "src":
- builder = get("src_builder", builder)
- self.builder = builder
+ if builder == "src":
+ builder = get("src_builder", builder)
+ self.builder = builder
- self.binary_builders = string.split(get("binary_builders"))
- self.tag_prefixes = string.split(get("tag_prefixes", ""))
- self.bot_email = get("bot_email", "")
- self.control_url = get("control_url")
- self.notify_email = get("notify_email")
- self.admin_email = get("admin_email")
- self.builder_list = get("builder_list", "")
- status.admin = self.admin_email
- status.builder_list = self.builder_list
- self.email = self.admin_email
+ self.binary_builders = string.split(get("binary_builders"))
+ self.tag_prefixes = string.split(get("tag_prefixes", ""))
+ self.bot_email = get("bot_email", "")
+ self.control_url = get("control_url")
+ self.notify_email = get("notify_email")
+ self.admin_email = get("admin_email")
+ self.builder_list = get("builder_list", "")
+ status.admin = self.admin_email
+ status.builder_list = self.builder_list
+ self.email = self.admin_email
- if builder == "all":
- return
+ if builder == "all":
+ return
- if builder not in p.sections():
- log.panic("builder %s not in config file" % builder)
- self.arch = get("arch")
- self.chroot = get("chroot")
- self.email = get("email")
- self.buildlogs_url = get("buildlogs_url")
- self.ftp_url = get("ftp_url")
- self.job_slots = int(get("job_slots"))
- self.max_load = float(get("max_load"))
- self.control_url = get("control_url")
- self.builder_user = get("builder_user", "builder")
- self.sudo_chroot_wrapper = get("sudo_chroot_wrapper", "")
- self.nice = get("nice", "0")
-
- f = get("syslog", "")
- if f != "":
- if syslog_facilities.has_key(f):
- log.open_syslog(self.builder, syslog_facilities[f])
- else:
- log.panic("no such syslog facility: %s" % f)
+ if builder not in p.sections():
+ log.panic("builder %s not in config file" % builder)
+ self.arch = get("arch")
+ self.chroot = get("chroot")
+ self.email = get("email")
+ self.buildlogs_url = get("buildlogs_url")
+ self.ftp_url = get("ftp_url")
+ self.job_slots = int(get("job_slots"))
+ self.max_load = float(get("max_load"))
+ self.control_url = get("control_url")
+ self.builder_user = get("builder_user", "builder")
+ self.sudo_chroot_wrapper = get("sudo_chroot_wrapper", "")
+ self.nice = get("nice", "0")
+
+ f = get("syslog", "")
+ if f != "":
+ if syslog_facilities.has_key(f):
+ log.open_syslog(self.builder, syslog_facilities[f])
+ else:
+ log.panic("no such syslog facility: %s" % f)
- self.done = 1
+ self.done = 1
config = Builder_Conf()
def init_conf(builder):
- os.environ['LC_ALL'] = "C"
- status.push("reading builder config")
- log.builder = builder
- if builder == "": builder = "all"
- config.read(builder)
- log.builder = config.builder
- status.pop()
+ os.environ['LC_ALL'] = "C"
+ status.push("reading builder config")
+ log.builder = builder
+ if builder == "": builder = "all"
+ config.read(builder)
+ log.builder = config.builder
+ status.pop()
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import string
from chroot import *
from util import *
__all__ = ['compute_deps', 'remove_list']
def compute_deps():
- """Compute dependenecies between RPM installed on system.
-
- Return dictionary from name of package to list of packages required by it.
- Produce some warnings and progress information to stderr.
- """
- # pkg-name -> list of stuff returned by rpm -qR
- rpm_req = {}
- # --whatprovides ...
- rpm_prov = {}
- # list of required files
- req_files = {}
-
- def get_req():
- msg("rpm-req... ")
- f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{REQUIRENAME}\n]'")
- cur_pkg = None
- while 1:
- l = f.readline()
- if l == "": break
- l = string.strip(l)
- if l == "@":
- cur_pkg = string.strip(f.readline())
- rpm_req[cur_pkg] = []
- continue
- rpm_req[cur_pkg].append(l)
- if l[0] == '/':
- req_files[l] = 1
- f.close()
- msg("done\n")
+ """Compute dependenecies between RPM installed on system.
- def add_provides(pkg, what):
- if rpm_prov.has_key(what):
- msg("[%s: %s, %s] " % (what, rpm_prov[what], pkg))
- else:
- rpm_prov[what] = pkg
-
- def get_prov():
- msg("rpm-prov... ")
- f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{PROVIDENAME}\n]'")
- cur_pkg = None
- while 1:
- l = f.readline()
- if l == "": break
- l = string.strip(l)
- if l == "@":
- cur_pkg = string.strip(f.readline())
- continue
- add_provides(cur_pkg, l)
- if l[0] == '/':
- # already provided
- del req_files[l]
- f.close()
- msg("done\n")
-
- def get_prov_files():
- msg("rpm-files... ")
- f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{FILENAMES}\n]'")
- cur_pkg = None
- while 1:
- l = f.readline()
- if l == "": break
- l = string.strip(l)
- if l == "@":
- cur_pkg = string.strip(f.readline())
- continue
- if req_files.has_key(l):
- add_provides(cur_pkg, l)
- f.close()
- msg("done\n")
+ Return dictionary from name of package to list of packages required by it.
+ Produce some warnings and progress information to stderr.
+ """
+ # pkg-name -> list of stuff returned by rpm -qR
+ rpm_req = {}
+ # --whatprovides ...
+ rpm_prov = {}
+ # list of required files
+ req_files = {}
+
+ def get_req():
+ msg("rpm-req... ")
+ f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{REQUIRENAME}\n]'")
+ cur_pkg = None
+ while 1:
+ l = f.readline()
+ if l == "": break
+ l = string.strip(l)
+ if l == "@":
+ cur_pkg = string.strip(f.readline())
+ rpm_req[cur_pkg] = []
+ continue
+ rpm_req[cur_pkg].append(l)
+ if l[0] == '/':
+ req_files[l] = 1
+ f.close()
+ msg("done\n")
- def compute():
- msg("computing deps... ")
- for pkg, reqs in rpm_req.items():
- pkg_reqs = []
- for req in reqs:
- if req[0:7] == "rpmlib(": continue
- if rpm_prov.has_key(req):
- if rpm_prov[req] not in pkg_reqs:
- pkg_reqs.append(rpm_prov[req])
+ def add_provides(pkg, what):
+ if rpm_prov.has_key(what):
+ msg("[%s: %s, %s] " % (what, rpm_prov[what], pkg))
else:
- msg("[%s: %s] " % (pkg, req))
- requires[pkg] = pkg_reqs
- msg("done\n")
+ rpm_prov[what] = pkg
- # map from pkg-name to list of pkg-names required by it
- # this is result
- requires = {}
+ def get_prov():
+ msg("rpm-prov... ")
+ f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{PROVIDENAME}\n]'")
+ cur_pkg = None
+ while 1:
+ l = f.readline()
+ if l == "": break
+ l = string.strip(l)
+ if l == "@":
+ cur_pkg = string.strip(f.readline())
+ continue
+ add_provides(cur_pkg, l)
+ if l[0] == '/':
+ # already provided
+ del req_files[l]
+ f.close()
+ msg("done\n")
+
+ def get_prov_files():
+ msg("rpm-files... ")
+ f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{FILENAMES}\n]'")
+ cur_pkg = None
+ while 1:
+ l = f.readline()
+ if l == "": break
+ l = string.strip(l)
+ if l == "@":
+ cur_pkg = string.strip(f.readline())
+ continue
+ if req_files.has_key(l):
+ add_provides(cur_pkg, l)
+ f.close()
+ msg("done\n")
+
+ def compute():
+ msg("computing deps... ")
+ for pkg, reqs in rpm_req.items():
+ pkg_reqs = []
+ for req in reqs:
+ if req[0:7] == "rpmlib(": continue
+ if rpm_prov.has_key(req):
+ if rpm_prov[req] not in pkg_reqs:
+ pkg_reqs.append(rpm_prov[req])
+ else:
+ msg("[%s: %s] " % (pkg, req))
+ requires[pkg] = pkg_reqs
+ msg("done\n")
+
+ # map from pkg-name to list of pkg-names required by it
+ # this is result
+ requires = {}
- get_req()
- get_prov()
- get_prov_files()
- compute()
- return requires
+ get_req()
+ get_prov()
+ get_prov_files()
+ compute()
+ return requires
def remove_list(req, need):
- """List of packages scheduled for removal.
-
- Given dependency information and list of needed packages compute list
- of packages that don't need to be present.
- """
- need_m = {}
- def close(n):
- if need_m.has_key(n): return
- need_m[n] = 1
- if not req.has_key(n): return
- for k in req[n]:
- close(k)
- for n in need: close(n)
- rm = []
- for p in req.keys():
- if not need_m.has_key(p): rm.append(p)
- return rm
+ """List of packages scheduled for removal.
+
+ Given dependency information and list of needed packages compute list
+ of packages that don't need to be present.
+ """
+ need_m = {}
+ def close(n):
+ if need_m.has_key(n): return
+ need_m[n] = 1
+ if not req.has_key(n): return
+ for k in req[n]:
+ close(k)
+ for n in need: close(n)
+ rm = []
+ for p in req.keys():
+ if not need_m.has_key(p): rm.append(p)
+ return rm
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import glob
import re
import string
retries_times = [5 * 60, 15 * 60, 60 * 60, 2 * 60 * 60, 5 * 60 * 60]
def read_name_val(file):
- f = open(file)
- r = {'_file': file[:-5], '_desc': file}
- rx = re.compile(r"^([^:]+)\s*:(.*)$")
- for l in f.xreadlines():
- if l == "END\n":
- f.close()
- return r
- m = rx.search(l)
- if m:
- r[m.group(1)] = string.strip(m.group(2))
- else:
- break
- f.close()
- return None
+ f = open(file)
+ r = {'_file': file[:-5], '_desc': file}
+ rx = re.compile(r"^([^:]+)\s*:(.*)$")
+ for l in f.xreadlines():
+ if l == "END\n":
+ f.close()
+ return r
+ m = rx.search(l)
+ if m:
+ r[m.group(1)] = string.strip(m.group(2))
+ else:
+ break
+ f.close()
+ return None
def scp_file(src, target):
- global problem
- f = os.popen("scp -v -B -p %s %s 2>&1 < /dev/null" % (src, target))
- problem = f.read()
- return f.close()
+ global problem
+ f = os.popen("scp -v -B -p %s %s 2>&1 < /dev/null" % (src, target))
+ problem = f.read()
+ return f.close()
def copy_file(src, target):
- try:
- shutil.copyfile(src, target)
- return 0
- except:
- global problem
- exctype, value = sys.exc_info()[:2]
- problem = "cannot copy file: %s" % traceback.format_exception_only(exctype, value)
- return 1
+ try:
+ shutil.copyfile(src, target)
+ return 0
+ except:
+ global problem
+ exctype, value = sys.exc_info()[:2]
+ problem = "cannot copy file: %s" % traceback.format_exception_only(exctype, value)
+ return 1
def rsync_file(src, target, host):
- global problem
- p = open(path.rsync_password_file, "r")
- password = None
- for l in p.xreadlines():
- l = string.split(l)
- if len(l) >= 2 and l[0] == host:
- password = l[1]
- p.close()
- rsync = "rsync --verbose --archive"
- if password != None:
- p = open(".rsync.pass", "w")
- os.chmod(".rsync.pass", 0600)
- p.write("%s\n" % password)
+ global problem
+ p = open(path.rsync_password_file, "r")
+ password = None
+ for l in p.xreadlines():
+ l = string.split(l)
+ if len(l) >= 2 and l[0] == host:
+ password = l[1]
p.close()
- rsync += " --password-file .rsync.pass"
- f = os.popen("%s %s %s 2>&1 < /dev/null" % (rsync, src, target))
- problem = f.read()
- res = f.close()
- if password != None: os.unlink(".rsync.pass")
- return f.close()
-
+ rsync = "rsync --verbose --archive"
+ if password != None:
+ p = open(".rsync.pass", "w")
+ os.chmod(".rsync.pass", 0600)
+ p.write("%s\n" % password)
+ p.close()
+ rsync += " --password-file .rsync.pass"
+ f = os.popen("%s %s %s 2>&1 < /dev/null" % (rsync, src, target))
+ problem = f.read()
+ res = f.close()
+ if password != None: os.unlink(".rsync.pass")
+ return f.close()
+
def send_file(src, target):
- log.notice("sending %s" % target)
- m = re.match('rsync://([^/]+)/.*', target)
- if m:
- return rsync_file(src, target, host = m.group(1))
- if target != "" and target[0] == '/':
- return copy_file(src, target)
- m = re.match('scp://([^@:]+@[^/:]+)(:|)(.*)', target)
- if m:
- return scp_file(src, m.group(1) + ":" + m.group(3))
- log.alert("unsupported protocol: %s" % target)
- # pretend everything went OK, so file is removed from queue,
- # and doesn't cause any additional problems
- return 0
+ log.notice("sending %s" % target)
+ m = re.match('rsync://([^/]+)/.*', target)
+ if m:
+ return rsync_file(src, target, host = m.group(1))
+ if target != "" and target[0] == '/':
+ return copy_file(src, target)
+ m = re.match('scp://([^@:]+@[^/:]+)(:|)(.*)', target)
+ if m:
+ return scp_file(src, m.group(1) + ":" + m.group(3))
+ log.alert("unsupported protocol: %s" % target)
+ # pretend everything went OK, so file is removed from queue,
+ # and doesn't cause any additional problems
+ return 0
def maybe_flush_queue(dir):
- retry_delay = 0
- try:
- f = open(dir + "retry-at")
- last_retry = int(string.strip(f.readline()))
- retry_delay = int(string.strip(f.readline()))
- f.close()
- if last_retry + retry_delay > time.time():
- return
- os.unlink(dir + "retry-at")
- except:
- pass
-
- status.push("flushing %s" % dir)
+ retry_delay = 0
+ try:
+ f = open(dir + "retry-at")
+ last_retry = int(string.strip(f.readline()))
+ retry_delay = int(string.strip(f.readline()))
+ f.close()
+ if last_retry + retry_delay > time.time():
+ return
+ os.unlink(dir + "retry-at")
+ except:
+ pass
+
+ status.push("flushing %s" % dir)
- if flush_queue(dir):
- f = open(dir + "retry-at", "w")
- if retry_delay in retries_times:
- idx = retries_times.index(retry_delay)
- if idx < len(retries_times) - 1: idx += 1
- else:
- idx = 0
- f.write("%d\n%d\n" % (time.time(), retries_times[idx]))
- f.close()
+ if flush_queue(dir):
+ f = open(dir + "retry-at", "w")
+ if retry_delay in retries_times:
+ idx = retries_times.index(retry_delay)
+ if idx < len(retries_times) - 1: idx += 1
+ else:
+ idx = 0
+ f.write("%d\n%d\n" % (time.time(), retries_times[idx]))
+ f.close()
- status.pop()
+ status.pop()
def flush_queue(dir):
- q = []
- os.chdir(dir)
- for f in glob.glob(dir + "/*.desc"):
- d = read_name_val(f)
- if d != None: q.append(d)
- def mycmp(x, y):
- rc = cmp(x['Time'], y['Time'])
- if (rc == 0):
- return cmp(x['Type'], y['Type'])
- else:
- return rc
- q.sort(mycmp)
-
- error = None
- remaining = q
- for d in q:
- if send_file(d['_file'], d['Target']):
- error = d
- break
- if os.access(d['_file'] + ".info", os.F_OK):
- if send_file(d['_file'] + ".info", d['Target'] + ".info"):
- error = d
- break
- os.unlink(d['_file'])
- os.unlink(d['_desc'])
- remaining = q[1:]
+ q = []
+ os.chdir(dir)
+ for f in glob.glob(dir + "/*.desc"):
+ d = read_name_val(f)
+ if d != None: q.append(d)
+ def mycmp(x, y):
+ rc = cmp(x['Time'], y['Time'])
+ if (rc == 0):
+ return cmp(x['Type'], y['Type'])
+ else:
+ return rc
+ q.sort(mycmp)
- if error != None:
- emails = {}
- emails[config.admin_email] = 1
- for d in remaining:
- if d.has_key('Requester'):
- emails[d['Requester']] = 1
- e = emails.keys()
- m = mailer.Message()
- m.set_headers(to = string.join(e, ", "),
- subject = "builder queue problem")
- m.write("there were problems sending files from queue %s:\n" % dir)
- m.write("problem: %s\n" % problem)
- m.send()
- log.error("error sending files from %s: %s" % (dir, problem))
- return 1
+ error = None
+ remaining = q
+ for d in q:
+ if send_file(d['_file'], d['Target']):
+ error = d
+ break
+ if os.access(d['_file'] + ".info", os.F_OK):
+ if send_file(d['_file'] + ".info", d['Target'] + ".info"):
+ error = d
+ break
+ os.unlink(d['_file'])
+ os.unlink(d['_desc'])
+ remaining = q[1:]
+
+ if error != None:
+ emails = {}
+ emails[config.admin_email] = 1
+ for d in remaining:
+ if d.has_key('Requester'):
+ emails[d['Requester']] = 1
+ e = emails.keys()
+ m = mailer.Message()
+ m.set_headers(to = string.join(e, ", "),
+ subject = "builder queue problem")
+ m.write("there were problems sending files from queue %s:\n" % dir)
+ m.write("problem: %s\n" % problem)
+ m.send()
+ log.error("error sending files from %s: %s" % (dir, problem))
+ return 1
- return 0
+ return 0
problem = ""
def main():
- if lock.lock("sending-files", non_block = 1) == None:
- return
- init_conf("")
- maybe_flush_queue(path.buildlogs_queue_dir)
- maybe_flush_queue(path.ftp_queue_dir)
+ if lock.lock("sending-files", non_block = 1) == None:
+ return
+ init_conf("")
+ maybe_flush_queue(path.buildlogs_queue_dir)
+ maybe_flush_queue(path.ftp_queue_dir)
if __name__ == '__main__':
- loop.run_loop(main)
+ loop.run_loop(main)
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import path
import os
import shutil
import util
class FTP_Queue:
- def __init__(self):
- self.queue = None
- self.some_failed = 0
- self.status = ""
+ def __init__(self):
+ self.queue = None
+ self.some_failed = 0
+ self.status = ""
- def init(self, g):
- self.queue = []
- self.requester_email = g.requester_email
- self.url = config.ftp_url
-
- def add(self, file, type):
- # if /dev/null, say bye bye
- if self.url == "/dev/null":
- return
- name = os.path.basename(file)
- id = util.uuid()
- shutil.copy(file, path.ftp_queue_dir + id)
- self.queue.append({'name': name, 'id': id, 'type': type})
- st = os.stat(path.ftp_queue_dir + id)
- self.status += "%10d %s\n" % (st.st_size, name)
+ def init(self, g):
+ self.queue = []
+ self.requester_email = g.requester_email
+ self.url = config.ftp_url
+
+ def add(self, file, type):
+ # if /dev/null, say bye bye
+ if self.url == "/dev/null":
+ return
+ name = os.path.basename(file)
+ id = util.uuid()
+ shutil.copy(file, path.ftp_queue_dir + id)
+ self.queue.append({'name': name, 'id': id, 'type': type})
+ st = os.stat(path.ftp_queue_dir + id)
+ self.status += "%10d %s\n" % (st.st_size, name)
- def flush(self):
- def desc(l):
- return """Target: %s/%s
+ def flush(self):
+ def desc(l):
+ return """Target: %s/%s
Builder: %s
Time: %d
Type: %s
Requester: %s
END
""" % (self.url, l['name'], config.builder, time.time(), l['type'], self.requester_email)
-
- for l in self.queue:
- f = open(path.ftp_queue_dir + l['id'] + ".desc", "w")
- f.write(desc(l))
- f.close()
+
+ for l in self.queue:
+ f = open(path.ftp_queue_dir + l['id'] + ".desc", "w")
+ f.write(desc(l))
+ f.close()
- def kill(self):
- for l in self.queue:
- os.unlink(path.ftp_queue_dir + l)
+ def kill(self):
+ for l in self.queue:
+ os.unlink(path.ftp_queue_dir + l)
queue = FTP_Queue()
def add(f, type="rpm"):
- queue.add(f, type)
+ queue.add(f, type)
def flush():
- queue.flush()
-
+ queue.flush()
+
def kill():
- queue.kill()
+ queue.kill()
def init(r):
- queue.init(r)
+ queue.init(r)
def status():
- return queue.status
-
+ return queue.status
+
def clear_status():
- queue.status = ""
+ queue.status = ""
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import re
import string
import xreadlines
def get_build_requires(spec, bconds_with, bconds_without):
- cond_rx = re.compile(r"%\{(\!\?|\?\!|\?)([a-zA-Z0-9_+]+)\s*:([^%\{\}]*)\}")
-
- def expand_conds(l):
- def expand_one(m):
- if m.group(1) == "?":
- if macros.has_key(m.group(2)):
- return m.group(3)
- else:
- if not macros.has_key(m.group(2)):
- return m.group(3)
- return ""
+ cond_rx = re.compile(r"%\{(\!\?|\?\!|\?)([a-zA-Z0-9_+]+)\s*:([^%\{\}]*)\}")
- for i in range(10):
- l = cond_rx.sub(expand_one, l)
- if len(l) > 1000: break
-
- return l
-
- macro_rx = re.compile(r"%\{([a-zA-Z0-9_+]+)\}")
- def expand_macros(l):
- def expand_one(m):
- if macros.has_key(m.group(1)):
- return string.strip(macros[m.group(1)])
- else:
- return m.group(0) # don't change
+ def expand_conds(l):
+ def expand_one(m):
+ if m.group(1) == "?":
+ if macros.has_key(m.group(2)):
+ return m.group(3)
+ else:
+ if not macros.has_key(m.group(2)):
+ return m.group(3)
+ return ""
- for i in range(10):
- l = macro_rx.sub(expand_one, l)
- if len(l) > 1000: break
-
- return expand_conds(l)
-
- simple_br_rx = re.compile(r"^BuildRequires\s*:\s*([^\s]+)", re.I)
- bcond_rx = re.compile(r"^%bcond_(with|without)\s+([^\s]+)")
- version_rx = re.compile(r"^Version\s*:\s*([^\s]+)", re.I)
- release_rx = re.compile(r"^Release\s*:\s*([^\s]+)", re.I)
- name_rx = re.compile(r"^Name\s*:\s*([^\s]+)", re.I)
- define_rx = re.compile(r"^\%define\s+([a-zA-Z0-9_+]+)\s+(.*)", re.I)
- any_br_rx = re.compile(r"BuildRequires", re.I)
-
- macros = {}
- for b in bconds_with:
- macros["_with_%s" % b] = 1
- for b in bconds_without:
- macros["_without_%s" % b] = 1
+ for i in range(10):
+ l = cond_rx.sub(expand_one, l)
+ if len(l) > 1000: break
- macros["__perl"] = "/usr/bin/perl"
- macros["_bindir"] = "/usr/bin"
- macros["_sbindir"] = "/usr/sbin"
- macros["kgcc_package"] = "gcc"
+ return l
- build_req = []
+ macro_rx = re.compile(r"%\{([a-zA-Z0-9_+]+)\}")
+ def expand_macros(l):
+ def expand_one(m):
+ if macros.has_key(m.group(1)):
+ return string.strip(macros[m.group(1)])
+ else:
+ return m.group(0) # don't change
+
+ for i in range(10):
+ l = macro_rx.sub(expand_one, l)
+ if len(l) > 1000: break
+
+ return expand_conds(l)
- f = open(spec)
- for l in xreadlines.xreadlines(f):
- l = string.strip(l)
- if l == "%changelog": break
+ simple_br_rx = re.compile(r"^BuildRequires\s*:\s*([^\s]+)", re.I)
+ bcond_rx = re.compile(r"^%bcond_(with|without)\s+([^\s]+)")
+ version_rx = re.compile(r"^Version\s*:\s*([^\s]+)", re.I)
+ release_rx = re.compile(r"^Release\s*:\s*([^\s]+)", re.I)
+ name_rx = re.compile(r"^Name\s*:\s*([^\s]+)", re.I)
+ define_rx = re.compile(r"^\%define\s+([a-zA-Z0-9_+]+)\s+(.*)", re.I)
+ any_br_rx = re.compile(r"BuildRequires", re.I)
- # %bcond_with..
- m = bcond_rx.search(l)
- if m:
- bcond = m.group(2)
- if m.group(1) == "with":
- if macros.has_key("_with_%s" % bcond):
- macros["with_%s" % bcond] = 1
- else:
- if not macros.has_key("_without_%s" % bcond):
- macros["with_%s" % bcond] = 1
- continue
-
- # name,version,release
- m = version_rx.search(l)
- if m: macros["version"] = m.group(1)
- m = release_rx.search(l)
- if m: macros["release"] = m.group(1)
- m = name_rx.search(l)
- if m: macros["name"] = m.group(1)
+ macros = {}
+ for b in bconds_with:
+ macros["_with_%s" % b] = 1
+ for b in bconds_without:
+ macros["_without_%s" % b] = 1
+
+ macros["__perl"] = "/usr/bin/perl"
+ macros["_bindir"] = "/usr/bin"
+ macros["_sbindir"] = "/usr/sbin"
+ macros["kgcc_package"] = "gcc"
- # %define
- m = define_rx.search(l)
- if m: macros[m.group(1)] = m.group(2)
+ build_req = []
+
+ f = open(spec)
+ for l in xreadlines.xreadlines(f):
+ l = string.strip(l)
+ if l == "%changelog": break
+
+ # %bcond_with..
+ m = bcond_rx.search(l)
+ if m:
+ bcond = m.group(2)
+ if m.group(1) == "with":
+ if macros.has_key("_with_%s" % bcond):
+ macros["with_%s" % bcond] = 1
+ else:
+ if not macros.has_key("_without_%s" % bcond):
+ macros["with_%s" % bcond] = 1
+ continue
- # *BuildRequires*
- if any_br_rx.search(l):
- l = expand_macros(l)
- m = simple_br_rx.search(l)
- if m:
- build_req.append(m.group(1))
- else:
- if l <> "" and l[0] <> '#':
- msg("spec error (%s): %s\n" % (spec, l))
+ # name,version,release
+ m = version_rx.search(l)
+ if m: macros["version"] = m.group(1)
+ m = release_rx.search(l)
+ if m: macros["release"] = m.group(1)
+ m = name_rx.search(l)
+ if m: macros["name"] = m.group(1)
+
+ # %define
+ m = define_rx.search(l)
+ if m: macros[m.group(1)] = m.group(2)
+
+ # *BuildRequires*
+ if any_br_rx.search(l):
+ l = expand_macros(l)
+ m = simple_br_rx.search(l)
+ if m:
+ build_req.append(m.group(1))
+ else:
+ if l <> "" and l[0] <> '#':
+ msg("spec error (%s): %s\n" % (spec, l))
- for x in build_req:
- print x
+ for x in build_req:
+ print x
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import popen2
import re
import StringIO
import pipeutil
def verify_sig(buf):
- """Check signature.
-
- Given email as file-like object, return (signer-emails, signed-body).
- where signer-emails is lists of strings, and signed-body is StringIO
- object.
- """
- (gpg_out, gpg_in, gpg_err) = popen2.popen3("gpg --batch --no-tty --decrypt")
- body = pipeutil.rw_pipe(buf, gpg_in, gpg_out)
- rx = re.compile("^gpg: Good signature from .*<([^>]+)>")
- emails = []
- for l in gpg_err.xreadlines():
- m = rx.match(l)
- if m:
- emails.append(m.group(1))
- gpg_err.close()
- return (emails, body)
+ """Check signature.
+
+ Given email as file-like object, return (signer-emails, signed-body).
+ where signer-emails is lists of strings, and signed-body is StringIO
+ object.
+ """
+ (gpg_out, gpg_in, gpg_err) = popen2.popen3("gpg --batch --no-tty --decrypt")
+ body = pipeutil.rw_pipe(buf, gpg_in, gpg_out)
+ rx = re.compile("^gpg: Good signature from .*<([^>]+)>")
+ emails = []
+ for l in gpg_err.xreadlines():
+ m = rx.match(l)
+ if m:
+ emails.append(m.group(1))
+ gpg_err.close()
+ return (emails, body)
def sign(buf):
- (gpg_out, gpg_in, gpg_err) = popen2.popen3("gpg --batch --no-tty --clearsign")
- body = pipeutil.rw_pipe(buf, gpg_in, gpg_out)
- gpg_err.close()
- return body
+ (gpg_out, gpg_in, gpg_err) = popen2.popen3("gpg --batch --no-tty --clearsign")
+ body = pipeutil.rw_pipe(buf, gpg_in, gpg_out)
+ gpg_err.close()
+ return body
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import re
import string
import chroot
def install_br(r, b):
- cmd = "cd rpm/SPECS; TMPDIR=$HOME/%s rpmbuild --nobuild %s %s 2>&1" \
- % (b.b_id, b.bconds_string(), b.spec)
- f = chroot.popen(cmd)
- rx = re.compile(r"^\s*([^\s]+) .*is needed by")
- needed = {}
- b.log_line("checking BR")
- for l in f.xreadlines():
- b.log_line("rpm: %s" % l)
- m = rx.search(l)
- if m: needed[m.group(1)] = 1
- f.close()
- if len(needed) == 0:
- b.log_line("no BR needed")
- return
- nbr = ""
- for bre in needed.keys():
- nbr = nbr + " " + re.escape(bre)
- br = string.strip(nbr)
- b.log_line("installing BR: %s" % br)
- res = chroot.run("poldek --up; poldek --upa; poldek --unique-pkg-names -v --upgrade %s" % br,
- user = "root",
- logfile = b.logfile)
- if res != 0:
- b.log_line("error: BR installation failed")
- return res
+ cmd = "cd rpm/SPECS; TMPDIR=$HOME/%s rpmbuild --nobuild %s %s 2>&1" \
+ % (b.b_id, b.bconds_string(), b.spec)
+ f = chroot.popen(cmd)
+ rx = re.compile(r"^\s*([^\s]+) .*is needed by")
+ needed = {}
+ b.log_line("checking BR")
+ for l in f.xreadlines():
+ b.log_line("rpm: %s" % l)
+ m = rx.search(l)
+ if m: needed[m.group(1)] = 1
+ f.close()
+ if len(needed) == 0:
+ b.log_line("no BR needed")
+ return
+ nbr = ""
+ for bre in needed.keys():
+ nbr = nbr + " " + re.escape(bre)
+ br = string.strip(nbr)
+ b.log_line("installing BR: %s" % br)
+ res = chroot.run("poldek --up; poldek --upa; poldek --unique-pkg-names -v --upgrade %s" % br,
+ user = "root",
+ logfile = b.logfile)
+ if res != 0:
+ b.log_line("error: BR installation failed")
+ return res
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import string
import os
import time
# it is determined based upon spool/got_lock file, which is also
# updated to be short
def builders_order():
- bs = {}
- bl = []
- for b in config.binary_builders:
- bs[b] = 0
- bl.append(b)
+ bs = {}
+ bl = []
+ for b in config.binary_builders:
+ bs[b] = 0
+ bl.append(b)
+
+ lck = lock.lock("got-lock")
+ f = open(path.got_lock_file, "r+")
+ line_no = 0
- lck = lock.lock("got-lock")
- f = open(path.got_lock_file, "r+")
- line_no = 0
-
- for l in f.xreadlines():
- line_no += 1
- b = string.strip(l)
- if bs.has_key(b):
- bs[b] = line_no
- else:
- log.alert("found strange lock in got-lock: %s" % b)
+ for l in f.xreadlines():
+ line_no += 1
+ b = string.strip(l)
+ if bs.has_key(b):
+ bs[b] = line_no
+ else:
+ log.alert("found strange lock in got-lock: %s" % b)
- def mycmp(b1, b2):
- return cmp(bs[b1], bs[b2])
-
- bl.sort(mycmp)
+ def mycmp(b1, b2):
+ return cmp(bs[b1], bs[b2])
+
+ bl.sort(mycmp)
- f.seek(0)
- f.truncate(0)
- for l in bl: f.write(l + "\n")
- f.close()
- lck.close()
+ f.seek(0)
+ f.truncate(0)
+ for l in bl: f.write(l + "\n")
+ f.close()
+ lck.close()
- return bl
+ return bl
def run_rpm_builder(b):
- if os.fork() == 0:
- return
- else:
- rpm_builder.main_for(b)
- sys.exit(0)
+ if os.fork() == 0:
+ return
+ else:
+ rpm_builder.main_for(b)
+ sys.exit(0)
def main():
- init_conf("")
- for b in builders_order():
- run_rpm_builder(b)
- # give builder some time to aquire lock
- time.sleep(1)
- # wait for children to die out
- try:
- while 1: os.wait()
- except:
- pass
+ init_conf("")
+ for b in builders_order():
+ run_rpm_builder(b)
+ # give builder some time to aquire lock
+ time.sleep(1)
+ # wait for children to die out
+ try:
+ while 1: os.wait()
+ except:
+ pass
if __name__ == '__main__':
- loop.run_loop(main)
+ loop.run_loop(main)
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import fcntl
import path
locks_list = []
def lock(n, non_block = 0):
- f = open(path.lock_dir + n, "a")
- # blah, otherwise it gets garbage collected and doesn't work
- locks_list.append(f)
- if non_block:
- try:
- fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except:
- f.close()
- return None
- else:
- fcntl.flock(f, fcntl.LOCK_EX)
- return f
+ f = open(path.lock_dir + n, "a")
+ # blah, otherwise it gets garbage collected and doesn't work
+ locks_list.append(f)
+ if non_block:
+ try:
+ fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except:
+ f.close()
+ return None
+ else:
+ fcntl.flock(f, fcntl.LOCK_EX)
+ return f
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import sys
import time
import syslog
do_syslog = 0
def log(p, s):
- if do_syslog:
- try:
- syslog.syslog(p, str(s))
- except TypeError:
- syslog.syslog(p, repr(s))
- f = open(path.log_file, "a")
- f.write("%s [%s]: %s\n" % (time.asctime(), builder, s))
- f.close()
-
+ if do_syslog:
+ try:
+ syslog.syslog(p, str(s))
+ except TypeError:
+ syslog.syslog(p, repr(s))
+ f = open(path.log_file, "a")
+ f.write("%s [%s]: %s\n" % (time.asctime(), builder, s))
+ f.close()
+
def panic(s):
- log(syslog.LOG_ALERT, "PANIC: %s" % s)
- raise "PANIC: %s" % str(s)
+ log(syslog.LOG_ALERT, "PANIC: %s" % s)
+ raise "PANIC: %s" % str(s)
def alert(s):
- log(syslog.LOG_ALERT, "alert: %s" % s)
+ log(syslog.LOG_ALERT, "alert: %s" % s)
def error(s):
- log(syslog.LOG_ERR, "error: %s" % s)
+ log(syslog.LOG_ERR, "error: %s" % s)
def warn(s):
- log(syslog.LOG_WARNING, "warning: %s" % s)
+ log(syslog.LOG_WARNING, "warning: %s" % s)
def notice(s):
- log(syslog.LOG_NOTICE, "notice: %s" % s)
+ log(syslog.LOG_NOTICE, "notice: %s" % s)
def open_syslog(name, f):
- global do_syslog
- do_syslog = 1
- syslog.openlog(name, syslog.LOG_PID, f)
+ global do_syslog
+ do_syslog = 1
+ syslog.openlog(name, syslog.LOG_PID, f)
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import os
import sys
import time
import wrap
def run_loop(fnc, secs = 5, max = 60):
- def run():
- pid = os.fork()
- if pid == 0:
- wrap.wrap(fnc)
- sys.exit(0)
- else:
- pid, s = os.waitpid(pid, 0)
- if os.WIFEXITED(s):
- s = os.WEXITSTATUS(s)
- if s != 0:
- sys.exit(s)
- else:
- sys.exit(10)
-
- start = time.time()
- while time.time() - start < max:
- last = time.time()
- run()
- took = time.time() - last
- if took < secs:
- time.sleep(secs - took)
+ def run():
+ pid = os.fork()
+ if pid == 0:
+ wrap.wrap(fnc)
+ sys.exit(0)
+ else:
+ pid, s = os.waitpid(pid, 0)
+ if os.WIFEXITED(s):
+ s = os.WEXITSTATUS(s)
+ if s != 0:
+ sys.exit(s)
+ else:
+ sys.exit(10)
+
+ start = time.time()
+ while time.time() - start < max:
+ last = time.time()
+ run()
+ took = time.time() - last
+ if took < secs:
+ time.sleep(secs - took)
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import time
import os
import sys
import util
class Message:
- def __init__(self):
- self.headers = {}
- self.body = StringIO.StringIO()
- self.set_std_headers()
+ def __init__(self):
+ self.headers = {}
+ self.body = StringIO.StringIO()
+ self.set_std_headers()
- def set_header(self, n, v):
- self.headers[n] = v
+ def set_header(self, n, v):
+ self.headers[n] = v
- def set_headers(self, to = None, cc = None, subject = None):
- if to != None:
- self.set_header("To", to)
- if cc != None:
- self.set_header("Cc", cc)
- if subject != None:
- self.set_header("Subject", subject)
+ def set_headers(self, to = None, cc = None, subject = None):
+ if to != None:
+ self.set_header("To", to)
+ if cc != None:
+ self.set_header("Cc", cc)
+ if subject != None:
+ self.set_header("Subject", subject)
- def write_line(self, l):
- self.body.write("%s\n" % l)
+ def write_line(self, l):
+ self.body.write("%s\n" % l)
- def write(self, s):
- self.body.write(s)
+ def write(self, s):
+ self.body.write(s)
- def append_log(self, log):
- s = os.stat(log)
- if s.st_size > 50000:
- # just head and tail
- f = open(log)
- line_cnt = 0
- for l in f.xreadlines():
- line_cnt += 1
- f.seek(0)
- line = 0
- for l in f.xreadlines():
- if line < 100 or line > line_cnt - 100:
- self.body.write(l)
- if line == line_cnt - 100:
- self.body.write("\n\n[...]\n\n")
- line += 1
- else:
- util.sendfile(open(log), self.body)
+ def append_log(self, log):
+ s = os.stat(log)
+ if s.st_size > 50000:
+ # just head and tail
+ f = open(log)
+ line_cnt = 0
+ for l in f.xreadlines():
+ line_cnt += 1
+ f.seek(0)
+ line = 0
+ for l in f.xreadlines():
+ if line < 100 or line > line_cnt - 100:
+ self.body.write(l)
+ if line == line_cnt - 100:
+ self.body.write("\n\n[...]\n\n")
+ line += 1
+ else:
+ util.sendfile(open(log), self.body)
- def set_std_headers(self):
- self.headers["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
- self.headers["Message-ID"] = "<pld-builder.%f.%d@%s>" \
- % (time.time(), os.getpid(), os.uname()[1])
- self.headers["From"] = "PLD %s builder <%s>" \
- % (config.builder, config.email)
- self.headers["X-PLD-Builder"] = config.builder
+ def set_std_headers(self):
+ self.headers["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
+ self.headers["Message-ID"] = "<pld-builder.%f.%d@%s>" \
+ % (time.time(), os.getpid(), os.uname()[1])
+ self.headers["From"] = "PLD %s builder <%s>" \
+ % (config.builder, config.email)
+ self.headers["X-PLD-Builder"] = config.builder
- def write_to(self, f):
- for k, v in self.headers.items():
- f.write("%s: %s\n" % (k, v))
- f.write("\n")
- self.body.seek(0)
- util.sendfile(self.body, f)
+ def write_to(self, f):
+ for k, v in self.headers.items():
+ f.write("%s: %s\n" % (k, v))
+ f.write("\n")
+ self.body.seek(0)
+ util.sendfile(self.body, f)
- def send(self):
- send_sendmail = "/usr/sbin/sendmail -t -f %s" % config.admin_email
- f = os.popen(send_sendmail, "w")
- self.write_to(f)
- f.close()
+ def send(self):
+ send_sendmail = "/usr/sbin/sendmail -t -f %s" % config.admin_email
+ f = os.popen(send_sendmail, "w")
+ self.write_to(f)
+ f.close()
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import StringIO
import mailer
from config import config
class Notifier:
- def __init__(self, g):
- self.xml = StringIO.StringIO()
- self.xml.write("<notification group-id='%s' builder='%s'>\n" % \
+ def __init__(self, g):
+ self.xml = StringIO.StringIO()
+ self.xml.write("<notification group-id='%s' builder='%s'>\n" % \
(g.id, config.builder))
-
- def send(self):
- self.xml.write("</notification>\n")
- msg = mailer.Message()
- msg.set_headers(to = config.notify_email, subject = "status notification")
- msg.set_header("X-New-PLD-Builder", "status-notification")
- self.xml.seek(0)
- util.sendfile(gpg.sign(self.xml), msg)
- msg.send()
- self.xml = None
-
- def add_batch(self, b, s):
- self.xml.write(" <batch id='%s' status='%s' />\n" % (b.b_id, s))
-
+
+ def send(self):
+ self.xml.write("</notification>\n")
+ msg = mailer.Message()
+ msg.set_headers(to = config.notify_email, subject = "status notification")
+ msg.set_header("X-New-PLD-Builder", "status-notification")
+ self.xml.seek(0)
+ util.sendfile(gpg.sign(self.xml), msg)
+ msg.send()
+ self.xml = None
+
+ def add_batch(self, b, s):
+ self.xml.write(" <batch id='%s' status='%s' />\n" % (b.b_id, s))
+
n = None
def begin(group):
- global n
- n = Notifier(group)
+ global n
+ n = Notifier(group)
def add_batch(batch, status):
- n.add_batch(batch, status)
+ n.add_batch(batch, status)
def send():
- n.send()
+ n.send()
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import os.path
root_dir = os.path.expanduser('~/pld-builder.new/')
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import select
import os
import StringIO
def rw_pipe(buf_, infd, outfd):
- buf = StringIO.StringIO()
- buf.write(buf_.read())
- ret = StringIO.StringIO()
- pos = 0
- rd_fin = 0
- wr_fin = 0
- buf.seek(pos)
- while not (rd_fin and wr_fin):
- if wr_fin: o = []
- else: o = [infd]
- if rd_fin: i = []
- else: i = [outfd]
- i, o, e = select.select(i, o, [])
- if i != []:
- s = os.read(outfd.fileno(), 1000)
- if s == "": rd_fin = 1
- ret.write(s)
- if o != []:
- buf.seek(pos)
- s = buf.read(1000)
- if s == "":
- infd.close()
- wr_fin = 1
- else:
- cnt = os.write(infd.fileno(), s)
- pos += cnt
- outfd.close()
- ret.seek(0)
- return ret
+ buf = StringIO.StringIO()
+ buf.write(buf_.read())
+ ret = StringIO.StringIO()
+ pos = 0
+ rd_fin = 0
+ wr_fin = 0
+ buf.seek(pos)
+ while not (rd_fin and wr_fin):
+ if wr_fin: o = []
+ else: o = [infd]
+ if rd_fin: i = []
+ else: i = [outfd]
+ i, o, e = select.select(i, o, [])
+ if i != []:
+ s = os.read(outfd.fileno(), 1000)
+ if s == "": rd_fin = 1
+ ret.write(s)
+ if o != []:
+ buf.seek(pos)
+ s = buf.read(1000)
+ if s == "":
+ infd.close()
+ wr_fin = 1
+ else:
+ cnt = os.write(infd.fileno(), s)
+ pos += cnt
+ outfd.close()
+ ret.seek(0)
+ return ret
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import re
import types
import string
def get_poldek_requires():
- # precompile regexps
- name_rx = re.compile(r"\d+\. ([^\s]+)-[^-]+-[^-]+\n")
- req_rx = re.compile(r" req .* --> (.*)\n")
- pkg_name_rx = re.compile(r"([^\s]+)-[^-]+-[^-]+")
+ # precompile regexps
+ name_rx = re.compile(r"\d+\. ([^\s]+)-[^-]+-[^-]+\n")
+ req_rx = re.compile(r" req .* --> (.*)\n")
+ pkg_name_rx = re.compile(r"([^\s]+)-[^-]+-[^-]+")
- # todo: if a and b are sets, then use sets module
- # and intersection method on set object
- def intersect(a, b):
- r = []
- for x in a:
- if x in b: r.append(x)
- return r
-
- # add given req-list to cur_pkg_reqs
- def add_req(reqs):
- if len(reqs) == 1:
- if reqs[0] not in cur_pkg_reqs:
- cur_pkg_reqs.append(reqs[0])
- else:
- did = 0
- for x in cur_pkg_reqs:
- if type(x) is types.ListType:
- i = intersect(x, reqs)
- if len(i) == 0:
- continue
- did = 1
- idx = cur_pkg_reqs.index(x)
- if len(i) == 1:
- if i[0] in cur_pkg_reqs:
- del cur_pkg_reqs[idx]
- else:
- cur_pkg_reqs[idx] = i[0]
- else:
- cur_pkg_reqs[idx] = i
+ # todo: if a and b are sets, then use sets module
+ # and intersection method on set object
+ def intersect(a, b):
+ r = []
+ for x in a:
+ if x in b: r.append(x)
+ return r
+
+ # add given req-list to cur_pkg_reqs
+ def add_req(reqs):
+ if len(reqs) == 1:
+ if reqs[0] not in cur_pkg_reqs:
+ cur_pkg_reqs.append(reqs[0])
else:
- if x in reqs:
- return
- if not did:
- cur_pkg_reqs.append(reqs)
+ did = 0
+ for x in cur_pkg_reqs:
+ if type(x) is types.ListType:
+ i = intersect(x, reqs)
+ if len(i) == 0:
+ continue
+ did = 1
+ idx = cur_pkg_reqs.index(x)
+ if len(i) == 1:
+ if i[0] in cur_pkg_reqs:
+ del cur_pkg_reqs[idx]
+ else:
+ cur_pkg_reqs[idx] = i[0]
+ else:
+ cur_pkg_reqs[idx] = i
+ else:
+ if x in reqs:
+ return
+ if not did:
+ cur_pkg_reqs.append(reqs)
+
+ pkg_reqs = {}
+ cur_pkg_reqs = None
+ cur_pkg = None
- pkg_reqs = {}
- cur_pkg_reqs = None
- cur_pkg = None
-
- f = chr_popen("poldek -v -v --verify --unique-pkg-names")
- for l in xreadlines.xreadlines(f):
- m = name_rx.match(l)
- if m:
- if cur_pkg:
- pkg_reqs[cur_pkg] = cur_pkg_reqs
- cur_pkg = m.groups(1)
- if pkg_reqs.has_key(cur_pkg):
- cur_pkg = None
- cur_pkg_reqs = None
- else:
- cur_pkg_reqs = []
- continue
- m = req_rx.match(l)
- if m:
- reqs = []
- for x in string.split(m.group(1)):
- if x in ["RPMLIB_CAP", "NOT", "FOUND", "UNMATCHED"]: continue
- m = pkg_name_rx.match(x)
+ f = chr_popen("poldek -v -v --verify --unique-pkg-names")
+ for l in xreadlines.xreadlines(f):
+ m = name_rx.match(l)
if m:
- reqs.append(m.group(1))
- else:
- msg("poldek_reqs: bad pkg name: %s\n" % x)
- if len(reqs) != 0: add_req(reqs)
-
- f.close()
+ if cur_pkg:
+ pkg_reqs[cur_pkg] = cur_pkg_reqs
+ cur_pkg = m.groups(1)
+ if pkg_reqs.has_key(cur_pkg):
+ cur_pkg = None
+ cur_pkg_reqs = None
+ else:
+ cur_pkg_reqs = []
+ continue
+ m = req_rx.match(l)
+ if m:
+ reqs = []
+ for x in string.split(m.group(1)):
+ if x in ["RPMLIB_CAP", "NOT", "FOUND", "UNMATCHED"]: continue
+ m = pkg_name_rx.match(x)
+ if m:
+ reqs.append(m.group(1))
+ else:
+ msg("poldek_reqs: bad pkg name: %s\n" % x)
+ if len(reqs) != 0: add_req(reqs)
+
+ f.close()
- if cur_pkg:
- pkg_reqs[cur_pkg] = cur_pkg_reqs
+ if cur_pkg:
+ pkg_reqs[cur_pkg] = cur_pkg_reqs
- return pkg_reqs
+ return pkg_reqs
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import string
import path
from config import config
def unpackaged_files(b):
- msg = "warning: Installed (but unpackaged) file(s) found:\n"
- f = open(b.logfile)
- copy_mode = 0
- out = []
- for l in f.xreadlines():
- if l == msg:
- copy_mode = 1
- out.append(l)
- elif copy_mode:
- if l[0] != ' ':
- copy_mode = 0
- else:
- out.append(l)
- return out
+ msg = "warning: Installed (but unpackaged) file(s) found:\n"
+ f = open(b.logfile)
+ copy_mode = 0
+ out = []
+ for l in f.xreadlines():
+ if l == msg:
+ copy_mode = 1
+ out.append(l)
+ elif copy_mode:
+ if l[0] != ' ':
+ copy_mode = 0
+ else:
+ out.append(l)
+ return out
def add_pld_builder_info(b):
- l = open(b.logfile, "a")
- l.write("Begin-PLD-Builder-Info\n")
- l.write("Build-Time: %s\n\n" % b.build_time)
- st = ftp.status()
- if st != "":
- l.write("Files queued for ftp:\n%s\n" % st)
- ftp.clear_status()
- l.writelines(unpackaged_files(b))
- l.write("End-PLD-Builder-Info\n")
+ l = open(b.logfile, "a")
+ l.write("Begin-PLD-Builder-Info\n")
+ l.write("Build-Time: %s\n\n" % b.build_time)
+ st = ftp.status()
+ if st != "":
+ l.write("Files queued for ftp:\n%s\n" % st)
+ ftp.clear_status()
+ l.writelines(unpackaged_files(b))
+ l.write("End-PLD-Builder-Info\n")
def info_from_log(b, target):
- beg = "Begin-PLD-Builder-Info\n"
- end = "End-PLD-Builder-Info\n"
- f = open(b.logfile)
- copy_mode = 0
- need_header = 1
- for l in f.xreadlines():
- if l == beg:
- if need_header:
- need_header = 0
- target.write("\n--- %s:%s:\n" % (b.spec, b.branch))
- copy_mode = 1
- elif copy_mode:
- if l == end:
- copy_mode = 0
- else:
- target.write(l)
-
+ beg = "Begin-PLD-Builder-Info\n"
+ end = "End-PLD-Builder-Info\n"
+ f = open(b.logfile)
+ copy_mode = 0
+ need_header = 1
+ for l in f.xreadlines():
+ if l == beg:
+ if need_header:
+ need_header = 0
+ target.write("\n--- %s:%s:\n" % (b.spec, b.branch))
+ copy_mode = 1
+ elif copy_mode:
+ if l == end:
+ copy_mode = 0
+ else:
+ target.write(l)
+
def send_report(r, is_src = False):
- s_failed = ' '.join([b.spec for b in r.batches if b.build_failed])
- s_ok = ' '.join([b.spec for b in r.batches if not b.build_failed])
-
- if s_failed: s_failed = "ERRORS: %s" % s_failed
- if s_ok: s_ok = "OK: %s" % s_ok
-
- subject = ' '.join((s_failed, s_ok))
-
- m = mailer.Message()
- m.set_headers(to = r.requester_email,
- cc = config.builder_list,
- subject = subject[0:100])
- if is_src:
- m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id)
- else:
- m.set_header("References", "<%s@pld.src.builder>" % r.id)
- m.set_header("In-Reply-To", "<%s@pld.src.builder>" % r.id)
-
- for b in r.batches:
- if b.build_failed and b.logfile == None:
- info = b.skip_reason
- elif b.build_failed:
- info = "FAILED"
- else:
- info = "OK"
- m.write("%s (%s): %s\n" % (b.spec, b.branch, info))
-
- for b in r.batches:
- if b.logfile != None:
- info_from_log(b, m)
-
- for b in r.batches:
- if (b.is_command () or b.build_failed) and b.logfile != None:
- m.write("\n\n*** buildlog for %s\n" % b.spec)
- m.append_log(b.logfile)
- m.write("\n\n")
-
- m.send()
+ s_failed = ' '.join([b.spec for b in r.batches if b.build_failed])
+ s_ok = ' '.join([b.spec for b in r.batches if not b.build_failed])
-def send_cia_report(r, is_src = False):
+ if s_failed: s_failed = "ERRORS: %s" % s_failed
+ if s_ok: s_ok = "OK: %s" % s_ok
- subject = 'DeliverXML'
-
- m = mailer.Message()
- if (len(config.bot_email) == 0):
- return
-
- m.set_headers(to = config.bot_email,
- subject = subject)
- m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id)
- m.set_header("X-mailer", "$Id$")
- m.set_header("X-builder", "PLD")
-
- # get header of xml message from file
- f = open(path.root_dir + 'PLD_Builder/cia-head.xml')
- m.write(f.read())
- f.close()
-
- # write in iteration list and status of all processed files
- for b in r.batches:
- # Instead of hardcoded Ac information use some config variable
- m.write('<package name="%s" arch="%s">\n' % (b.spec, b.branch))
- if b.build_failed:
- m.write('<failed/>\n')
+ subject = ' '.join((s_failed, s_ok))
+
+ m = mailer.Message()
+ m.set_headers(to = r.requester_email,
+ cc = config.builder_list,
+ subject = subject[0:100])
+ if is_src:
+ m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id)
else:
- m.write('<success/>\n')
- m.write('</package>\n')
-
- # get footer of xml message from file
- f = open(path.root_dir + 'PLD_Builder/cia-foot.xml')
- m.write(f.read())
- f.close()
-
- # send the e-mail
- m.send()
+ m.set_header("References", "<%s@pld.src.builder>" % r.id)
+ m.set_header("In-Reply-To", "<%s@pld.src.builder>" % r.id)
+
+ for b in r.batches:
+ if b.build_failed and b.logfile == None:
+ info = b.skip_reason
+ elif b.build_failed:
+ info = "FAILED"
+ else:
+ info = "OK"
+ m.write("%s (%s): %s\n" % (b.spec, b.branch, info))
+
+ for b in r.batches:
+ if b.logfile != None:
+ info_from_log(b, m)
+
+ for b in r.batches:
+ if (b.is_command () or b.build_failed) and b.logfile != None:
+ m.write("\n\n*** buildlog for %s\n" % b.spec)
+ m.append_log(b.logfile)
+ m.write("\n\n")
+
+ m.send()
+
+def send_cia_report(r, is_src = False):
+
+ subject = 'DeliverXML'
+
+ m = mailer.Message()
+ if (len(config.bot_email) == 0):
+ return
+
+ m.set_headers(to = config.bot_email,
+ subject = subject)
+ m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id)
+ m.set_header("X-mailer", "$Id$")
+ m.set_header("X-builder", "PLD")
+
+ # get header of xml message from file
+ f = open(path.root_dir + 'PLD_Builder/cia-head.xml')
+ m.write(f.read())
+ f.close()
+
+ # write in iteration list and status of all processed files
+ for b in r.batches:
+ # Instead of hardcoded Ac information use some config variable
+ m.write('<package name="%s" arch="%s">\n' % (b.spec, b.branch))
+ if b.build_failed:
+ m.write('<failed/>\n')
+ else:
+ m.write('<success/>\n')
+ m.write('</package>\n')
+
+ # get footer of xml message from file
+ f = open(path.root_dir + 'PLD_Builder/cia-foot.xml')
+ m.write(f.read())
+ f.close()
+
+ # send the e-mail
+ m.send()
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
from xml.dom.minidom import *
import string
import time
from acl import acl
__all__ = ['parse_request', 'parse_requests']
-
+
def text(e):
- res = ""
- for n in e.childNodes:
- if n.nodeType != Element.TEXT_NODE:
- log.panic("xml: text expected in <%s>, got %d" % (e.nodeName, n.nodeType))
- res += n.nodeValue
- return res
+ res = ""
+ for n in e.childNodes:
+ if n.nodeType != Element.TEXT_NODE:
+ log.panic("xml: text expected in <%s>, got %d" % (e.nodeName, n.nodeType))
+ res += n.nodeValue
+ return res
def attr(e, a, default = None):
- try:
- return e.attributes[a].value
- except:
- if default != None:
- return default
- raise
+ try:
+ return e.attributes[a].value
+ except:
+ if default != None:
+ return default
+ raise
def escape(s):
- return xml.sax.saxutils.escape(s)
+ return xml.sax.saxutils.escape(s)
def is_blank(e):
- return e.nodeType == Element.TEXT_NODE and string.strip(e.nodeValue) == ""
-
+ return e.nodeType == Element.TEXT_NODE and string.strip(e.nodeValue) == ""
+
class Group:
- def __init__(self, e):
- self.batches = []
- self.kind = 'group'
- self.id = attr(e, "id")
- self.no = int(attr(e, "no"))
- self.priority = 2
- self.time = time.time()
- self.requester = ""
- self.requester_email = ""
- self.flags = string.split(attr(e, "flags", ""))
- for c in e.childNodes:
- if is_blank(c): continue
- if c.nodeType != Element.ELEMENT_NODE:
- log.panic("xml: evil group child %d" % c.nodeType)
- if c.nodeName == "batch":
- self.batches.append(Batch(c))
- elif c.nodeName == "requester":
- self.requester = text(c)
- self.requester_email = attr(c, "email", "")
- elif c.nodeName == "priority":
- self.priority = int(text(c))
- elif c.nodeName == "time":
- self.time = int(text(c))
- else:
- log.panic("xml: evil group child (%s)" % c.nodeName)
- # note that we also check that group is sorted WRT deps
- m = {}
- for b in self.batches:
- deps = []
- m[b.b_id] = b
- for dep in b.depends_on:
- if m.has_key(dep):
- # avoid self-deps
- if id(m[dep]) != id(b):
- deps.append(m[dep])
- else:
- log.panic("xml: dependency not found in group")
- b.depends_on = deps
- if self.requester_email == "" and self.requester != "":
- self.requester_email = acl.user(self.requester).mail_to()
+ def __init__(self, e):
+ self.batches = []
+ self.kind = 'group'
+ self.id = attr(e, "id")
+ self.no = int(attr(e, "no"))
+ self.priority = 2
+ self.time = time.time()
+ self.requester = ""
+ self.requester_email = ""
+ self.flags = string.split(attr(e, "flags", ""))
+ for c in e.childNodes:
+ if is_blank(c): continue
+ if c.nodeType != Element.ELEMENT_NODE:
+ log.panic("xml: evil group child %d" % c.nodeType)
+ if c.nodeName == "batch":
+ self.batches.append(Batch(c))
+ elif c.nodeName == "requester":
+ self.requester = text(c)
+ self.requester_email = attr(c, "email", "")
+ elif c.nodeName == "priority":
+ self.priority = int(text(c))
+ elif c.nodeName == "time":
+ self.time = int(text(c))
+ else:
+ log.panic("xml: evil group child (%s)" % c.nodeName)
+ # note that we also check that group is sorted WRT deps
+ m = {}
+ for b in self.batches:
+ deps = []
+ m[b.b_id] = b
+ for dep in b.depends_on:
+ if m.has_key(dep):
+ # avoid self-deps
+ if id(m[dep]) != id(b):
+ deps.append(m[dep])
+ else:
+ log.panic("xml: dependency not found in group")
+ b.depends_on = deps
+ if self.requester_email == "" and self.requester != "":
+ self.requester_email = acl.user(self.requester).mail_to()
- def dump(self, f):
- f.write("group: %d (id=%s pri=%d)\n" % (self.no, self.id, self.priority))
- f.write(" from: %s\n" % self.requester)
- f.write(" flags: %s\n" % string.join(self.flags))
- f.write(" time: %s\n" % time.asctime(time.localtime(self.time)))
- for b in self.batches:
- b.dump(f)
- f.write("\n")
+ def dump(self, f):
+ f.write("group: %d (id=%s pri=%d)\n" % (self.no, self.id, self.priority))
+ f.write(" from: %s\n" % self.requester)
+ f.write(" flags: %s\n" % string.join(self.flags))
+ f.write(" time: %s\n" % time.asctime(time.localtime(self.time)))
+ for b in self.batches:
+ b.dump(f)
+ f.write("\n")
- def dump_html(self, f):
- f.write("<p><b>%d</b>. %s from %s <small>%s, %d, %s</small><br/>\n" % \
+ def dump_html(self, f):
+ f.write("<p><b>%d</b>. %s from %s <small>%s, %d, %s</small><br/>\n" % \
(self.no,
escape(time.strftime("%Y.%m.%d %H:%M:%S", time.localtime(self.time))),
escape(self.requester),
self.id, self.priority, string.join(self.flags)))
- f.write("<ul>\n")
- for b in self.batches:
- b.dump_html(f)
- f.write("</ul>\n")
- f.write("</p>\n")
+ f.write("<ul>\n")
+ for b in self.batches:
+ b.dump_html(f)
+ f.write("</ul>\n")
+ f.write("</p>\n")
- def write_to(self, f):
- f.write("""
+ def write_to(self, f):
+ f.write("""
<group id="%s" no="%d" flags="%s">
<requester email='%s'>%s</requester>
<time>%d</time>
<priority>%d</priority>\n""" % (self.id, self.no, string.join(self.flags),
- escape(self.requester_email), escape(self.requester),
- self.time, self.priority))
- for b in self.batches:
- b.write_to(f)
- f.write(" </group>\n\n")
+ escape(self.requester_email), escape(self.requester),
+ self.time, self.priority))
+ for b in self.batches:
+ b.write_to(f)
+ f.write(" </group>\n\n")
- def is_done(self):
- ok = 1
- for b in self.batches:
- if not b.is_done():
- ok = 0
- return ok
+ def is_done(self):
+ ok = 1
+ for b in self.batches:
+ if not b.is_done():
+ ok = 0
+ return ok
class Batch:
- def __init__(self, e):
- self.bconds_with = []
- self.bconds_without = []
- self.builders = []
- self.builders_status = {}
- self.branch = ""
- self.src_rpm = ""
- self.info = ""
- self.spec = ""
- self.command = ""
- self.command_flags = []
- self.gb_id = ""
- self.b_id = attr(e, "id")
- self.depends_on = string.split(attr(e, "depends-on"))
- for c in e.childNodes:
- if is_blank(c): continue
- if c.nodeType != Element.ELEMENT_NODE:
- log.panic("xml: evil batch child %d" % c.nodeType)
- if c.nodeName == "src-rpm":
- self.src_rpm = text(c)
- elif c.nodeName == "spec":
- self.spec = text(c)
- elif c.nodeName == "command":
- self.spec = "COMMAND"
- self.command = text(c)
- self.command_flags = string.split(attr(c, "flags", ""))
- elif c.nodeName == "info":
- self.info = text(c)
- elif c.nodeName == "branch":
- self.branch = text(c)
- elif c.nodeName == "builder":
- self.builders.append(text(c))
- self.builders_status[text(c)] = attr(c, "status", "?")
- elif c.nodeName == "with":
- self.bconds_with.append(text(c))
- elif c.nodeName == "without":
- self.bconds_without.append(text(c))
- else:
- log.panic("xml: evil batch child (%s)" % c.nodeName)
+ def __init__(self, e):
+ self.bconds_with = []
+ self.bconds_without = []
+ self.builders = []
+ self.builders_status = {}
+ self.branch = ""
+ self.src_rpm = ""
+ self.info = ""
+ self.spec = ""
+ self.command = ""
+ self.command_flags = []
+ self.gb_id = ""
+ self.b_id = attr(e, "id")
+ self.depends_on = string.split(attr(e, "depends-on"))
+ for c in e.childNodes:
+ if is_blank(c): continue
+ if c.nodeType != Element.ELEMENT_NODE:
+ log.panic("xml: evil batch child %d" % c.nodeType)
+ if c.nodeName == "src-rpm":
+ self.src_rpm = text(c)
+ elif c.nodeName == "spec":
+ self.spec = text(c)
+ elif c.nodeName == "command":
+ self.spec = "COMMAND"
+ self.command = text(c)
+ self.command_flags = string.split(attr(c, "flags", ""))
+ elif c.nodeName == "info":
+ self.info = text(c)
+ elif c.nodeName == "branch":
+ self.branch = text(c)
+ elif c.nodeName == "builder":
+ self.builders.append(text(c))
+ self.builders_status[text(c)] = attr(c, "status", "?")
+ elif c.nodeName == "with":
+ self.bconds_with.append(text(c))
+ elif c.nodeName == "without":
+ self.bconds_without.append(text(c))
+ else:
+ log.panic("xml: evil batch child (%s)" % c.nodeName)
- def is_done(self):
- ok = 1
- for b in self.builders:
- s = self.builders_status[b]
- if not (s == "OK" or s == "FAIL" or s == "SKIP"):
- ok = 0
- return ok
-
- def dump(self, f):
- f.write(" batch: %s/%s\n" % (self.src_rpm, self.spec))
- f.write(" info: %s\n" % self.info)
- f.write(" branch: %s\n" % self.branch)
- f.write(" bconds: %s\n" % self.bconds_string())
- builders = []
- for b in self.builders:
- builders.append("%s:%s" % (b, self.builders_status[b]))
- f.write(" builders: %s\n" % string.join(builders))
+ def is_done(self):
+ ok = 1
+ for b in self.builders:
+ s = self.builders_status[b]
+ if not (s == "OK" or s == "FAIL" or s == "SKIP"):
+ ok = 0
+ return ok
+
+ def dump(self, f):
+ f.write(" batch: %s/%s\n" % (self.src_rpm, self.spec))
+ f.write(" info: %s\n" % self.info)
+ f.write(" branch: %s\n" % self.branch)
+ f.write(" bconds: %s\n" % self.bconds_string())
+ builders = []
+ for b in self.builders:
+ builders.append("%s:%s" % (b, self.builders_status[b]))
+ f.write(" builders: %s\n" % string.join(builders))
- def is_command(self):
- return self.command != ""
+ def is_command(self):
+ return self.command != ""
- def dump_html(self, f):
- f.write("<li>\n")
- if self.is_command():
- desc = "SH: %s [%s]" % (self.command, ' '.join(self.command_flags))
- else:
- desc = "%s (%s -R %s %s)" % \
- (self.src_rpm, self.spec, self.branch, self.bconds_string())
- f.write("%s <small>[" % desc)
- builders = []
- bl_archs = {
- "th-SRPMS":0, "th-i486":1, "th-i686":2, "th-athlon":3, "th-ia64":4,
- "th-alpha":5, "th-sparc":6, "th-ppc":7,
- "ac-SRPMS":8, "ac-i386":9, "ac-i586":10, "ac-i686":11, "ac-athlon":12,
- "ac-amd64":13, "ac-alpha":14, "ac-sparc":15, "ac-ppc":16,
- "ra-i386":17, "ra-i586":18, "ra-i686":19, "ra-alpha":20,
- "ra-sparc":21, "ra-ppc":22, "nest-i486":23, "nest-i686":24,
- "nest-alpha":25, "nest-ppc":26
- }
- for b in self.builders:
- s = self.builders_status[b]
- if s == "OK":
- c = "green"
- elif s == "FAIL":
- c = "red"
- elif s == "SKIP":
- c = "blue"
- else:
- c = "black"
- link_pre = ""
- link_post = ""
- if bl_archs.has_key(b) and (s == "OK" or s == "FAIL") and len(self.spec) > 5:
+ def dump_html(self, f):
+ f.write("<li>\n")
if self.is_command():
- bl_name = "command"
- else:
- bl_name = self.spec[:len(self.spec)-5]
- path = "/%s/%s/%s.bz2" % (b.replace('-','/'), s, bl_name)
- is_ok = 0
- if s == "OK": is_ok = 1
- link_pre = "<a href=\"http://buildlogs.pld-linux.org/index.php?idx=%d&ok=%d&id=%s\">" \
- % (bl_archs[b], is_ok, binascii.b2a_hex(md5.new(path).digest()))
- link_post = "</a>"
- builders.append(link_pre + ("<font color='%s'><b>%s:%s</b></font>" % (c, b, s)) + link_post)
- f.write("%s]</small></li>\n" % string.join(builders))
+ desc = "SH: %s [%s]" % (self.command, ' '.join(self.command_flags))
+ else:
+ desc = "%s (%s -R %s %s)" % \
+ (self.src_rpm, self.spec, self.branch, self.bconds_string())
+ f.write("%s <small>[" % desc)
+ builders = []
+ bl_archs = {
+ "th-SRPMS":0, "th-i486":1, "th-i686":2, "th-athlon":3, "th-ia64":4,
+ "th-alpha":5, "th-sparc":6, "th-ppc":7,
+ "ac-SRPMS":8, "ac-i386":9, "ac-i586":10, "ac-i686":11, "ac-athlon":12,
+ "ac-amd64":13, "ac-alpha":14, "ac-sparc":15, "ac-ppc":16,
+ "ra-i386":17, "ra-i586":18, "ra-i686":19, "ra-alpha":20,
+ "ra-sparc":21, "ra-ppc":22, "nest-i486":23, "nest-i686":24,
+ "nest-alpha":25, "nest-ppc":26
+ }
+ for b in self.builders:
+ s = self.builders_status[b]
+ if s == "OK":
+ c = "green"
+ elif s == "FAIL":
+ c = "red"
+ elif s == "SKIP":
+ c = "blue"
+ else:
+ c = "black"
+ link_pre = ""
+ link_post = ""
+ if bl_archs.has_key(b) and (s == "OK" or s == "FAIL") and len(self.spec) > 5:
+ if self.is_command():
+ bl_name = "command"
+ else:
+ bl_name = self.spec[:len(self.spec)-5]
+ path = "/%s/%s/%s.bz2" % (b.replace('-','/'), s, bl_name)
+ is_ok = 0
+ if s == "OK": is_ok = 1
+ link_pre = "<a href=\"http://buildlogs.pld-linux.org/index.php?idx=%d&ok=%d&id=%s\">" \
+ % (bl_archs[b], is_ok, binascii.b2a_hex(md5.new(path).digest()))
+ link_post = "</a>"
+ builders.append(link_pre + ("<font color='%s'><b>%s:%s</b></font>" %
+ (c, b, s)) + link_post)
+ f.write("%s]</small></li>\n" % string.join(builders))
- def bconds_string(self):
- r = ""
- for b in self.bconds_with:
- r = r + " --with " + b
- for b in self.bconds_without:
- r = r + " --without " + b
- return r
-
- def write_to(self, f):
- f.write("""
+ def bconds_string(self):
+ r = ""
+ for b in self.bconds_with:
+ r = r + " --with " + b
+ for b in self.bconds_without:
+ r = r + " --without " + b
+ return r
+
+ def write_to(self, f):
+ f.write("""
<batch id='%s' depends-on='%s'>
<src-rpm>%s</src-rpm>
<command flags="%s">%s</command>
<spec>%s</spec>
<branch>%s</branch>
<info>%s</info>\n""" % (self.b_id,
- string.join(map(lambda (b): b.b_id, self.depends_on)),
- escape(self.src_rpm),
- escape(' '.join(self.command_flags)), escape(self.command),
- escape(self.spec), escape(self.branch), escape(self.info)))
- for b in self.bconds_with:
- f.write(" <with>%s</with>\n" % escape(b))
- for b in self.bconds_without:
- f.write(" <without>%s</without>\n" % escape(b))
- for b in self.builders:
- f.write(" <builder status='%s'>%s</builder>\n" % \
- (escape(self.builders_status[b]), escape(b)))
- f.write(" </batch>\n")
-
- def log_line(self, l):
- log.notice(l)
- if self.logfile != None:
- util.append_to(self.logfile, l)
+ string.join(map(lambda (b): b.b_id, self.depends_on)),
+ escape(self.src_rpm),
+ escape(' '.join(self.command_flags)), escape(self.command),
+ escape(self.spec), escape(self.branch), escape(self.info)))
+ for b in self.bconds_with:
+ f.write(" <with>%s</with>\n" % escape(b))
+ for b in self.bconds_without:
+ f.write(" <without>%s</without>\n" % escape(b))
+ for b in self.builders:
+ f.write(" <builder status='%s'>%s</builder>\n" % \
+ (escape(self.builders_status[b]), escape(b)))
+ f.write(" </batch>\n")
+
+ def log_line(self, l):
+ log.notice(l)
+ if self.logfile != None:
+ util.append_to(self.logfile, l)
- def expand_builders(batch, all_builders):
- all = []
- for bld in batch.builders:
- res = []
- for my_bld in all_builders:
- if fnmatch.fnmatch(my_bld, bld):
- res.append(my_bld)
- if res != []:
- all.extend(res)
- else:
- all.append(bld)
- batch.builders = all
+ def expand_builders(batch, all_builders):
+ all = []
+ for bld in batch.builders:
+ res = []
+ for my_bld in all_builders:
+ if fnmatch.fnmatch(my_bld, bld):
+ res.append(my_bld)
+ if res != []:
+ all.extend(res)
+ else:
+ all.append(bld)
+ batch.builders = all
class Notification:
- def __init__(self, e):
- self.batches = []
- self.kind = 'notification'
- self.group_id = attr(e, "group-id")
- self.builder = attr(e, "builder")
- self.batches = {}
- for c in e.childNodes:
- if is_blank(c): continue
- if c.nodeType != Element.ELEMENT_NODE:
- log.panic("xml: evil notification child %d" % c.nodeType)
- if c.nodeName == "batch":
- id = attr(c, "id")
- status = attr(c, "status")
- if status != "OK" and status != "FAIL" and status != "SKIP":
- log.panic("xml notification: bad status: %s" % self.status)
- self.batches[id] = status
- else:
- log.panic("xml: evil notification child (%s)" % c.nodeName)
+ def __init__(self, e):
+ self.batches = []
+ self.kind = 'notification'
+ self.group_id = attr(e, "group-id")
+ self.builder = attr(e, "builder")
+ self.batches = {}
+ for c in e.childNodes:
+ if is_blank(c): continue
+ if c.nodeType != Element.ELEMENT_NODE:
+ log.panic("xml: evil notification child %d" % c.nodeType)
+ if c.nodeName == "batch":
+ id = attr(c, "id")
+ status = attr(c, "status")
+ if status != "OK" and status != "FAIL" and status != "SKIP":
+ log.panic("xml notification: bad status: %s" % self.status)
+ self.batches[id] = status
+ else:
+ log.panic("xml: evil notification child (%s)" % c.nodeName)
- def apply_to(self, q):
- for r in q.requests:
- if r.kind == "group":
- for b in r.batches:
- if self.batches.has_key(b.b_id):
- b.builders_status[self.builder] = self.batches[b.b_id]
+ def apply_to(self, q):
+ for r in q.requests:
+ if r.kind == "group":
+ for b in r.batches:
+ if self.batches.has_key(b.b_id):
+ b.builders_status[self.builder] = self.batches[b.b_id]
def build_request(e):
- if e.nodeType != Element.ELEMENT_NODE:
- log.panic("xml: evil request element")
- if e.nodeName == "group":
- return Group(e)
- elif e.nodeName == "notification":
- return Notification(e)
- elif e.nodeName == "command":
- # FIXME
- return Command(e)
- else:
- log.panic("xml: evil request <%s>" % e.nodeName)
+ if e.nodeType != Element.ELEMENT_NODE:
+ log.panic("xml: evil request element")
+ if e.nodeName == "group":
+ return Group(e)
+ elif e.nodeName == "notification":
+ return Notification(e)
+ elif e.nodeName == "command":
+ # FIXME
+ return Command(e)
+ else:
+ log.panic("xml: evil request <%s>" % e.nodeName)
def parse_request(f):
- d = parse(f)
- return build_request(d.documentElement)
-
+ d = parse(f)
+ return build_request(d.documentElement)
+
def parse_requests(f):
- d = parse(f)
- res = []
- for r in d.documentElement.childNodes:
- if is_blank(r): continue
- res.append(build_request(r))
- return res
+ d = parse(f)
+ res = []
+ for r in d.documentElement.childNodes:
+ if is_blank(r): continue
+ res.append(build_request(r))
+ return res
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import string
import signal
import os
last_count = 0
def alarmalarm(signum, frame):
- raise IOError, 'TCP connection hung'
+ raise IOError, 'TCP connection hung'
def has_new(control_url):
- global last_count
- cnt_f = open(path.last_req_no_file)
- last_count = int(string.strip(cnt_f.readline()))
- cnt_f.close()
- f = None
- signal.signal(signal.SIGALRM, alarmalarm)
- try:
- signal.alarm(240)
- f = urllib.urlopen(control_url + "/max_req_no")
- signal.alarm(0)
- except:
- signal.alarm(0)
- log.error("can't fetch %s" % (control_url + "/max_req_no"))
- sys.exit(1)
- res = 0
- if int(string.strip(f.readline())) != last_count:
- res = 1
- f.close()
- return res
+ global last_count
+ cnt_f = open(path.last_req_no_file)
+ last_count = int(string.strip(cnt_f.readline()))
+ cnt_f.close()
+ f = None
+ signal.signal(signal.SIGALRM, alarmalarm)
+ try:
+ signal.alarm(240)
+ f = urllib.urlopen(control_url + "/max_req_no")
+ signal.alarm(0)
+ except:
+ signal.alarm(0)
+ log.error("can't fetch %s" % (control_url + "/max_req_no"))
+ sys.exit(1)
+ res = 0
+ if int(string.strip(f.readline())) != last_count:
+ res = 1
+ f.close()
+ return res
def fetch_queue(control_url):
- signal.signal(signal.SIGALRM, alarmalarm)
- try:
- signal.alarm(240)
- f = urllib.urlopen(control_url + "/queue.gz")
- signal.alarm(0)
- except:
- signal.alarm(0)
- log.error("can't fetch %s" % (control_url + "/queue.gz"))
- sys.exit(1)
- sio = StringIO.StringIO()
- util.sendfile(f, sio)
- f.close()
- sio.seek(0)
- f = gzip.GzipFile(fileobj = sio)
- (signers, body) = gpg.verify_sig(f)
- u = acl.user_by_email(signers)
- if u == None:
- log.alert("queue.gz not signed with signature of valid user: %s" % signers)
- sys.exit(1)
- if not u.can_do("sign_queue", "all"):
- log.alert("user %s is not allowed to sign my queue" % u.login)
- sys.exit(1)
- body.seek(0)
- return request.parse_requests(body)
+ signal.signal(signal.SIGALRM, alarmalarm)
+ try:
+ signal.alarm(240)
+ f = urllib.urlopen(control_url + "/queue.gz")
+ signal.alarm(0)
+ except:
+ signal.alarm(0)
+ log.error("can't fetch %s" % (control_url + "/queue.gz"))
+ sys.exit(1)
+ sio = StringIO.StringIO()
+ util.sendfile(f, sio)
+ f.close()
+ sio.seek(0)
+ f = gzip.GzipFile(fileobj = sio)
+ (signers, body) = gpg.verify_sig(f)
+ u = acl.user_by_email(signers)
+ if u == None:
+ log.alert("queue.gz not signed with signature of valid user: %s" % signers)
+ sys.exit(1)
+ if not u.can_do("sign_queue", "all"):
+ log.alert("user %s is not allowed to sign my queue" % u.login)
+ sys.exit(1)
+ body.seek(0)
+ return request.parse_requests(body)
def handle_reqs(builder, reqs):
- qpath = path.queue_file + "-" + builder
- if not os.access(qpath, os.F_OK):
- util.append_to(qpath, "<queue/>\n")
- q = B_Queue(qpath)
- q.lock(0)
- q.read()
- for r in reqs:
- if r.kind != 'group':
- raise 'handle_reqs: fatal: huh? %s' % r.kind
- need_it = 0
- for b in r.batches:
- if builder in b.builders:
- need_it = 1
- if need_it:
- log.notice("queued %s (%d) for %s" % (r.id, r.no, builder))
- q.add(r)
- q.write()
- q.unlock()
+ qpath = path.queue_file + "-" + builder
+ if not os.access(qpath, os.F_OK):
+ util.append_to(qpath, "<queue/>\n")
+ q = B_Queue(qpath)
+ q.lock(0)
+ q.read()
+ for r in reqs:
+ if r.kind != 'group':
+ raise 'handle_reqs: fatal: huh? %s' % r.kind
+ need_it = 0
+ for b in r.batches:
+ if builder in b.builders:
+ need_it = 1
+ if need_it:
+ log.notice("queued %s (%d) for %s" % (r.id, r.no, builder))
+ q.add(r)
+ q.write()
+ q.unlock()
def main():
- lck = lock.lock("request_fetcher", non_block = True)
- if lck == None:
- sys.exit(1)
- init_conf("")
-
- status.push("fetching requests")
- if has_new(config.control_url):
- q = fetch_queue(config.control_url)
- max_no = 0
- q_new = []
- for r in q:
- if r.no > max_no:
- max_no = r.no
- if r.no > last_count:
- q_new.append(r)
- for b in config.binary_builders:
- handle_reqs(b, q_new)
- f = open(path.last_req_no_file, "w")
- f.write("%d\n" % max_no)
- f.close()
- status.pop()
- lck.close()
-
+ lck = lock.lock("request_fetcher", non_block = True)
+ if lck == None:
+ sys.exit(1)
+ init_conf("")
+
+ status.push("fetching requests")
+ if has_new(config.control_url):
+ q = fetch_queue(config.control_url)
+ max_no = 0
+ q_new = []
+ for r in q:
+ if r.no > max_no:
+ max_no = r.no
+ if r.no > last_count:
+ q_new.append(r)
+ for b in config.binary_builders:
+ handle_reqs(b, q_new)
+ f = open(path.last_req_no_file, "w")
+ f.write("%d\n" % max_no)
+ f.close()
+ status.pop()
+ lck.close()
+
if __name__ == '__main__':
- # http connection is established (and few bytes transferred through it)
- # each $secs seconds.
- loop.run_loop(main, secs = 10)
+ # http connection is established (and few bytes transferred through it)
+ # each $secs seconds.
+ loop.run_loop(main, secs = 10)
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import email
import string
import time
from config import config, init_conf
def check_double_id(id):
- id_nl = id + "\n"
-
- ids = open(path.processed_ids_file)
- for i in ids.xreadlines():
- if i == id_nl:
- # FIXME: security email here?
- log.alert("request %s already processed" % id)
- return 1
- ids.close()
-
- ids = open(path.processed_ids_file, "a")
- ids.write(id_nl)
- ids.close()
+ id_nl = id + "\n"
+
+ ids = open(path.processed_ids_file)
+ for i in ids.xreadlines():
+ if i == id_nl:
+ # FIXME: security email here?
+ log.alert("request %s already processed" % id)
+ return 1
+ ids.close()
+
+ ids = open(path.processed_ids_file, "a")
+ ids.write(id_nl)
+ ids.close()
- return 0
+ return 0
def handle_group(r, user):
- def fail_mail(msg):
- if len(r.batches) >= 1:
- spec = r.batches[0].spec
- else:
- spec = "None.spec"
- log.error("%s: %s" % (spec, msg))
- m = user.message_to()
- m.set_headers(subject = "building %s failed" % spec)
- m.write_line(msg)
- m.send()
-
- lock("request")
- if check_double_id(r.id):
- return
+ def fail_mail(msg):
+ if len(r.batches) >= 1:
+ spec = r.batches[0].spec
+ else:
+ spec = "None.spec"
+ log.error("%s: %s" % (spec, msg))
+ m = user.message_to()
+ m.set_headers(subject = "building %s failed" % spec)
+ m.write_line(msg)
+ m.send()
+ lock("request")
+ if check_double_id(r.id):
+ return
+
- for batch in r.batches:
- if not user.can_do("src", config.builder, batch.branch):
- fail_mail("user %s is not allowed to src:%s:%s" \
- % (user.get_login(), config.builder, batch.branch))
- return
+ for batch in r.batches:
+ if not user.can_do("src", config.builder, batch.branch):
+ fail_mail("user %s is not allowed to src:%s:%s" \
+ % (user.get_login(), config.builder, batch.branch))
+ return
- if "upgrade" in r.flags and not user.can_do("upgrade", config.builder, batch.branch):
- fail_mail("user %s is not allowed to upgrade:%s:%s" \
- % (user.get_login(), config.builder, batch.branch))
- return
+ if "upgrade" in r.flags and not user.can_do("upgrade", config.builder, batch.branch):
+ fail_mail("user %s is not allowed to upgrade:%s:%s" \
+ % (user.get_login(), config.builder, batch.branch))
+ return
- batch.expand_builders(config.binary_builders)
- if not batch.is_command() and config.builder in batch.builders:
- batch.builders.remove(config.builder)
- for bld in batch.builders:
- batch.builders_status[bld] = '?'
- if bld not in config.binary_builders and bld != config.builder:
- fail_mail("I (src rpm builder '%s') do not handle binary builder '%s', only '%s'" % \
+ batch.expand_builders(config.binary_builders)
+ if not batch.is_command() and config.builder in batch.builders:
+ batch.builders.remove(config.builder)
+ for bld in batch.builders:
+ batch.builders_status[bld] = '?'
+ if bld not in config.binary_builders and bld != config.builder:
+ fail_mail("I (src rpm builder '%s') do not handle binary builder '%s', only '%s'" % \
(config.builder, bld, string.join(config.binary_builders)))
- return
- if batch.is_command():
- if not user.can_do("command", bld):
- fail_mail("user %s is not allowed to command:%s" \
- % (user.get_login(), bld))
- return
- elif not user.can_do("binary", bld, batch.branch):
- pkg = batch.spec
- if pkg.endswith(".spec"):
- pkg = pkg[:-5]
- if not user.can_do("binary-" + pkg, bld, batch.branch):
- fail_mail("user %s is not allowed to binary-%s:%s:%s" \
- % (user.get_login(), pkg, bld, batch.branch))
- return
-
- r.priority = user.check_priority(r.priority,config.builder)
- r.requester = user.get_login()
- r.requester_email = user.mail_to()
- r.time = time.time()
- log.notice("queued %s from %s" % (r.id, user.get_login()))
- q = B_Queue(path.queue_file)
- q.lock(0)
- q.read()
- q.add(r)
- q.write()
- q.unlock()
+ return
+ if batch.is_command():
+ if not user.can_do("command", bld):
+ fail_mail("user %s is not allowed to command:%s" \
+ % (user.get_login(), bld))
+ return
+ elif not user.can_do("binary", bld, batch.branch):
+ pkg = batch.spec
+ if pkg.endswith(".spec"):
+ pkg = pkg[:-5]
+ if not user.can_do("binary-" + pkg, bld, batch.branch):
+ fail_mail("user %s is not allowed to binary-%s:%s:%s" \
+ % (user.get_login(), pkg, bld, batch.branch))
+ return
+
+ r.priority = user.check_priority(r.priority,config.builder)
+ r.requester = user.get_login()
+ r.requester_email = user.mail_to()
+ r.time = time.time()
+ log.notice("queued %s from %s" % (r.id, user.get_login()))
+ q = B_Queue(path.queue_file)
+ q.lock(0)
+ q.read()
+ q.add(r)
+ q.write()
+ q.unlock()
def handle_notification(r, user):
- if not user.can_do("notify", r.builder):
- log.alert("user %s is not allowed to notify:%s" % (user.login, r.builder))
- q = B_Queue(path.req_queue_file)
- q.lock(0)
- q.read()
- not_fin = filter(lambda (r): not r.is_done(), q.requests)
- r.apply_to(q)
- for r in not_fin:
- if r.is_done():
- util.clean_tmp(path.srpms_dir + r.id)
- now = time.time()
- def leave_it(r):
- # for ,,done'' set timeout to 4d
- if r.is_done() and r.time + 4 * 24 * 60 * 60 < now:
- return False
- # and for not ,,done'' set it to 20d
- if r.time + 20 * 24 * 60 * 60 < now:
- util.clean_tmp(path.srpms_dir + r.id)
- return False
- return True
- q.requests = filter(leave_it, q.requests)
- q.write()
- q.dump(open(path.queue_stats_file, "w"))
- q.dump_html(open(path.queue_html_stats_file, "w"))
- os.chmod(path.queue_html_stats_file, 0644)
- os.chmod(path.queue_stats_file, 0644)
- q.unlock()
+ if not user.can_do("notify", r.builder):
+ log.alert("user %s is not allowed to notify:%s" % (user.login, r.builder))
+ q = B_Queue(path.req_queue_file)
+ q.lock(0)
+ q.read()
+ not_fin = filter(lambda (r): not r.is_done(), q.requests)
+ r.apply_to(q)
+ for r in not_fin:
+ if r.is_done():
+ util.clean_tmp(path.srpms_dir + r.id)
+ now = time.time()
+ def leave_it(r):
+ # for ,,done'' set timeout to 4d
+ if r.is_done() and r.time + 4 * 24 * 60 * 60 < now:
+ return False
+ # and for not ,,done'' set it to 20d
+ if r.time + 20 * 24 * 60 * 60 < now:
+ util.clean_tmp(path.srpms_dir + r.id)
+ return False
+ return True
+ q.requests = filter(leave_it, q.requests)
+ q.write()
+ q.dump(open(path.queue_stats_file, "w"))
+ q.dump_html(open(path.queue_html_stats_file, "w"))
+ os.chmod(path.queue_html_stats_file, 0644)
+ os.chmod(path.queue_stats_file, 0644)
+ q.unlock()
def handle_request(f):
- sio = StringIO.StringIO()
- util.sendfile(f, sio)
- sio.seek(0)
- (em, body) = gpg.verify_sig(sio)
- user = acl.user_by_email(em)
- if user == None:
- # FIXME: security email here
- log.alert("invalid signature, or not in acl %s" % em)
- return
- acl.set_current_user(user)
- status.push("email from %s" % user.login)
- r = request.parse_request(body)
- if r.kind == 'group':
- handle_group(r, user)
- elif r.kind == 'notification':
- handle_notification(r, user)
- else:
- msg = "%s: don't know how to handle requests of this kind '%s'" \
- % (user.get_login(), r.kind)
- log.alert(msg)
- m = user.message_to()
- m.set_headers(subject = "unknown request")
- m.write_line(msg)
- m.send()
- status.pop()
+ sio = StringIO.StringIO()
+ util.sendfile(f, sio)
+ sio.seek(0)
+ (em, body) = gpg.verify_sig(sio)
+ user = acl.user_by_email(em)
+ if user == None:
+ # FIXME: security email here
+ log.alert("invalid signature, or not in acl %s" % em)
+ return
+ acl.set_current_user(user)
+ status.push("email from %s" % user.login)
+ r = request.parse_request(body)
+ if r.kind == 'group':
+ handle_group(r, user)
+ elif r.kind == 'notification':
+ handle_notification(r, user)
+ else:
+ msg = "%s: don't know how to handle requests of this kind '%s'" \
+ % (user.get_login(), r.kind)
+ log.alert(msg)
+ m = user.message_to()
+ m.set_headers(subject = "unknown request")
+ m.write_line(msg)
+ m.send()
+ status.pop()
def main():
- init_conf("src")
- status.push("handling email request")
- handle_request(sys.stdin)
- status.pop()
- sys.exit(0)
+ init_conf("src")
+ status.push("handling email request")
+ handle_request(sys.stdin)
+ status.pop()
+ sys.exit(0)
if __name__ == '__main__':
- wrap.wrap(main)
+ wrap.wrap(main)
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import sys
import os
import atexit
socket.myorigsocket=socket.socket
def mysocket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
- s=socket.myorigsocket(family, type, proto)
- s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
- return s
+ s=socket.myorigsocket(family, type, proto)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+ return s
socket.socket=mysocket
# *HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*
# this code is duplicated in srpm_builder, but we
# might want to handle some cases differently here
def pick_request(q):
- def mycmp(r1, r2):
- if r1.kind != 'group' or r2.kind != 'group':
- raise "non-group requests"
- pri_diff = cmp(r1.priority, r2.priority)
- if pri_diff == 0:
- return cmp(r1.time, r2.time)
- else:
- return pri_diff
- q.requests.sort(mycmp)
- ret = q.requests[0]
- return ret
+ def mycmp(r1, r2):
+ if r1.kind != 'group' or r2.kind != 'group':
+ raise "non-group requests"
+ pri_diff = cmp(r1.priority, r2.priority)
+ if pri_diff == 0:
+ return cmp(r1.time, r2.time)
+ else:
+ return pri_diff
+ q.requests.sort(mycmp)
+ ret = q.requests[0]
+ return ret
def fetch_src(r, b):
- src_url = config.control_url + "/srpms/" + r.id + "/" + b.src_rpm
- b.log_line("fetching %s" % src_url)
- start = time.time()
- good=False
- while not good:
- try:
- good=True
- f = urllib.urlopen(src_url)
- except IOError, error:
- if error[1][0] == 60 or error[1][0] == 110 or error[1][0] == -3 or error[1][0] == 111 or error[1][0] == 61:
- good=False
- b.log_line("unable to connect... trying again")
- else:
- f = urllib.urlopen(src_url) # So we get the exception logged :)
-
- o = chroot.popen("cat > %s" % b.src_rpm, mode = "w")
- bytes = util.sendfile(f, o)
- f.close()
- o.close()
- t = time.time() - start
- if t == 0:
- b.log_line("fetched %d bytes" % bytes)
- else:
- b.log_line("fetched %d bytes, %.1f K/s" % (bytes, bytes / 1024.0 / t))
+ src_url = config.control_url + "/srpms/" + r.id + "/" + b.src_rpm
+ b.log_line("fetching %s" % src_url)
+ start = time.time()
+ good=False
+ while not good:
+ try:
+ good=True
+ f = urllib.urlopen(src_url)
+ except IOError, error:
+ if error[1][0] == 60 or error[1][0] == 110 or error[1][0] == -3 or error[1][0] == 111 or error[1][0] == 61:
+ good=False
+ b.log_line("unable to connect... trying again")
+ else:
+ f = urllib.urlopen(src_url) # So we get the exception logged :)
+
+ o = chroot.popen("cat > %s" % b.src_rpm, mode = "w")
+ bytes = util.sendfile(f, o)
+ f.close()
+ o.close()
+ t = time.time() - start
+ if t == 0:
+ b.log_line("fetched %d bytes" % bytes)
+ else:
+ b.log_line("fetched %d bytes, %.1f K/s" % (bytes, bytes / 1024.0 / t))
def build_rpm(r, b):
- status.push("building %s" % b.spec)
- b.log_line("request from: %s" % r.requester)
- b.log_line("started at: %s" % time.asctime())
- fetch_src(r, b)
- b.log_line("installing srpm: %s" % b.src_rpm)
- res = chroot.run("rpm -U %s" % b.src_rpm, logfile = b.logfile)
- chroot.run("rm -f %s" % b.src_rpm, logfile = b.logfile)
- b.files = []
- tmpdir = "/tmp/B." + b.b_id[0:6]
- if res:
- b.log_line("error: installing src rpm failed")
- res = 1
- else:
- chroot.run("install -m 700 -d %s" % tmpdir)
- rpmbuild_opt = "%s --target %s-pld-linux" % (b.bconds_string(), config.arch)
- cmd = "cd rpm/SPECS; TMPDIR=%s nice -n %s rpmbuild -bb %s %s" % \
- (tmpdir, config.nice, rpmbuild_opt, b.spec)
- if ("no-install-br" not in r.flags) and install_br.install_br(r, b):
- res = 1
+ status.push("building %s" % b.spec)
+ b.log_line("request from: %s" % r.requester)
+ b.log_line("started at: %s" % time.asctime())
+ fetch_src(r, b)
+ b.log_line("installing srpm: %s" % b.src_rpm)
+ res = chroot.run("rpm -U %s" % b.src_rpm, logfile = b.logfile)
+ chroot.run("rm -f %s" % b.src_rpm, logfile = b.logfile)
+ b.files = []
+ tmpdir = "/tmp/B." + b.b_id[0:6]
+ if res:
+ b.log_line("error: installing src rpm failed")
+ res = 1
else:
- b.log_line("building RPM using: %s" % cmd)
- res = chroot.run(cmd, logfile = b.logfile)
- files = util.collect_files(b.logfile)
- if len(files) > 0:
- r.chroot_files.extend(files)
- else:
- b.log_line("error: No files produced.")
- res = 1 # FIXME: is it error?
- b.files = files
- chroot.run("rm -rf %s; cd rpm/SPECS; rpmbuild --nodeps --nobuild " \
- "--clean --rmspec --rmsource %s" % \
- (tmpdir, b.spec), logfile = b.logfile)
- chroot.run("rm -rf $HOME/rpm/BUILD/*")
-
- def ll(l):
- util.append_to(b.logfile, l)
+ chroot.run("install -m 700 -d %s" % tmpdir)
+ rpmbuild_opt = "%s --target %s-pld-linux" % (b.bconds_string(), config.arch)
+ cmd = "cd rpm/SPECS; TMPDIR=%s nice -n %s rpmbuild -bb %s %s" % \
+ (tmpdir, config.nice, rpmbuild_opt, b.spec)
+ if ("no-install-br" not in r.flags) and install_br.install_br(r, b):
+ res = 1
+ else:
+ b.log_line("building RPM using: %s" % cmd)
+ res = chroot.run(cmd, logfile = b.logfile)
+ files = util.collect_files(b.logfile)
+ if len(files) > 0:
+ r.chroot_files.extend(files)
+ else:
+ b.log_line("error: No files produced.")
+ res = 1 # FIXME: is it error?
+ b.files = files
+ chroot.run("rm -rf %s; cd rpm/SPECS; rpmbuild --nodeps --nobuild " \
+ "--clean --rmspec --rmsource %s" % \
+ (tmpdir, b.spec), logfile = b.logfile)
+ chroot.run("rm -rf $HOME/rpm/BUILD/*")
+
+ def ll(l):
+ util.append_to(b.logfile, l)
- if b.files != []:
- chroot.run("cp -f %s /spools/ready/; poldek --nodiff --mkidxz -s /spools/ready/" % \
- string.join(b.files), logfile = b.logfile, user = "root")
- ll("Begin-PLD-Builder-Info")
- if "upgrade" in r.flags:
- upgrade.upgrade_from_batch(r, b)
- else:
- ll("not upgrading")
- ll("End-PLD-Builder-Info")
+ if b.files != []:
+ chroot.run("cp -f %s /spools/ready/; poldek --nodiff --mkidxz " \
+ "-s /spools/ready/" % \
+ string.join(b.files), logfile = b.logfile, user = "root")
+ ll("Begin-PLD-Builder-Info")
+ if "upgrade" in r.flags:
+ upgrade.upgrade_from_batch(r, b)
+ else:
+ ll("not upgrading")
+ ll("End-PLD-Builder-Info")
- for f in b.files:
- local = r.tmp_dir + os.path.basename(f)
- chroot.run("cat %s; rm -f %s" % (f, f), logfile = local)
- ftp.add(local)
-
- def uploadinfo(b):
- c="file:SRPMS:%s\n" % b.src_rpm
for f in b.files:
- c=c + "file:ARCH:%s\n" % os.path.basename(f)
- c=c + "END\n"
- return c
-
- if b.files != []:
- fname = r.tmp_dir + b.src_rpm + ".uploadinfo"
- f = open(fname, "w")
- f.write(uploadinfo(b))
- f.close()
- ftp.add(fname, "uploadinfo")
+ local = r.tmp_dir + os.path.basename(f)
+ chroot.run("cat %s; rm -f %s" % (f, f), logfile = local)
+ ftp.add(local)
- status.pop()
+ def uploadinfo(b):
+ c="file:SRPMS:%s\n" % b.src_rpm
+ for f in b.files:
+ c=c + "file:ARCH:%s\n" % os.path.basename(f)
+ c=c + "END\n"
+ return c
- return res
+ if b.files != []:
+ fname = r.tmp_dir + b.src_rpm + ".uploadinfo"
+ f = open(fname, "w")
+ f.write(uploadinfo(b))
+ f.close()
+ ftp.add(fname, "uploadinfo")
+
+ status.pop()
+
+ return res
def handle_request(r):
- ftp.init(r)
- buildlogs.init(r)
- build.build_all(r, build_rpm)
- report.send_report(r, is_src = False)
- ftp.flush()
- notify.send()
+ ftp.init(r)
+ buildlogs.init(r)
+ build.build_all(r, build_rpm)
+ report.send_report(r, is_src = False)
+ ftp.flush()
+ notify.send()
def check_load():
- do_exit = 0
- try:
- f = open("/proc/loadavg")
- if float(string.split(f.readline())[2]) > config.max_load:
- do_exit = 1
- except:
- pass
- if do_exit:
- sys.exit(0)
+ do_exit = 0
+ try:
+ f = open("/proc/loadavg")
+ if float(string.split(f.readline())[2]) > config.max_load:
+ do_exit = 1
+ except:
+ pass
+ if do_exit:
+ sys.exit(0)
def main_for(builder):
- init_conf(builder)
- # allow only one build in given builder at once
- if not lock.lock("building-rpm-for-%s" % config.builder, non_block = 1):
- return
- # don't kill server
- check_load()
- # not more then job_slots builds at once
- locked = 0
- for slot in range(config.job_slots):
- if lock.lock("building-rpm-slot-%d" % slot, non_block = 1):
- locked = 1
- break
- if not locked:
- return
-
- status.push("picking request for %s" % config.builder)
- q = B_Queue(path.queue_file + "-" + config.builder)
- q.lock(0)
- q.read()
- if q.requests == []:
+ init_conf(builder)
+ # allow only one build in given builder at once
+ if not lock.lock("building-rpm-for-%s" % config.builder, non_block = 1):
+ return
+ # don't kill server
+ check_load()
+ # not more then job_slots builds at once
+ locked = 0
+ for slot in range(config.job_slots):
+ if lock.lock("building-rpm-slot-%d" % slot, non_block = 1):
+ locked = 1
+ break
+ if not locked:
+ return
+
+ status.push("picking request for %s" % config.builder)
+ q = B_Queue(path.queue_file + "-" + config.builder)
+ q.lock(0)
+ q.read()
+ if q.requests == []:
+ q.unlock()
+ return
+ req = pick_request(q)
q.unlock()
- return
- req = pick_request(q)
- q.unlock()
- status.pop()
-
- # record fact that we got lock for this builder, load balancer
- # will use it for fair-queuing
- l = lock.lock("got-lock")
- f = open(path.got_lock_file, "a")
- f.write(config.builder + "\n")
- f.close()
- l.close()
-
- msg = "handling request %s (%d) for %s from %s" \
- % (req.id, req.no, config.builder, req.requester)
- log.notice(msg)
- status.push(msg)
- handle_request(req)
- status.pop()
-
- def otherreqs(r):
- if r.no==req.no:
- return False
- else:
- return True
-
- q = B_Queue(path.queue_file + "-" + config.builder)
- q.lock(0)
- q.read()
- previouslen=len(q.requests)
- q.requests=filter(otherreqs, q.requests)
- if len(q.requests)<previouslen:
- q.write()
- q.unlock()
-
+ status.pop()
+
+ # record fact that we got lock for this builder, load balancer
+ # will use it for fair-queuing
+ l = lock.lock("got-lock")
+ f = open(path.got_lock_file, "a")
+ f.write(config.builder + "\n")
+ f.close()
+ l.close()
+
+ msg = "handling request %s (%d) for %s from %s" \
+ % (req.id, req.no, config.builder, req.requester)
+ log.notice(msg)
+ status.push(msg)
+ handle_request(req)
+ status.pop()
+
+ def otherreqs(r):
+ if r.no==req.no:
+ return False
+ else:
+ return True
+
+ q = B_Queue(path.queue_file + "-" + config.builder)
+ q.lock(0)
+ q.read()
+ previouslen=len(q.requests)
+ q.requests=filter(otherreqs, q.requests)
+ if len(q.requests)<previouslen:
+ q.write()
+ q.unlock()
+
def main():
- if len(sys.argv) < 2:
- raise "fatal: need to have builder name as first arg"
- return main_for(sys.argv[1])
-
+ if len(sys.argv) < 2:
+ raise "fatal: need to have builder name as first arg"
+ return main_for(sys.argv[1])
+
if __name__ == '__main__':
- loop.run_loop(main)
+ loop.run_loop(main)
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import email
import string
import time
from config import config, init_conf
def pick_request(q):
- def mycmp(r1, r2):
- if r1.kind != 'group' or r2.kind != 'group':
- raise "non-group requests"
- pri_diff = cmp(r1.priority, r2.priority)
- if pri_diff == 0:
- return cmp(r1.time, r2.time)
- else:
- return pri_diff
- q.requests.sort(mycmp)
- ret = q.requests[0]
- q.requests = q.requests[1:]
- return ret
-
+ def mycmp(r1, r2):
+ if r1.kind != 'group' or r2.kind != 'group':
+ raise "non-group requests"
+ pri_diff = cmp(r1.priority, r2.priority)
+ if pri_diff == 0:
+ return cmp(r1.time, r2.time)
+ else:
+ return pri_diff
+ q.requests.sort(mycmp)
+ ret = q.requests[0]
+ q.requests = q.requests[1:]
+ return ret
+
def store_binary_request(r):
- new_b = []
- for b in r.batches:
- if not b.build_failed: new_b.append(b)
- if new_b == []:
- return
- r.batches = new_b
- # store new queue and max_req_no for binary builders
- cnt_f = open(path.max_req_no_file, "r+")
- num = int(string.strip(cnt_f.read())) + 1
- r.no = num
- q = B_Queue(path.req_queue_file)
- q.lock(0)
- q.read()
- q.add(r)
- q.write()
- q.dump(open(path.queue_stats_file, "w"))
- q.dump_html(open(path.queue_html_stats_file, "w"))
- os.chmod(path.queue_stats_file, 0644)
- os.chmod(path.queue_html_stats_file, 0644)
- q.write_signed(path.req_queue_signed_file)
- os.chmod(path.req_queue_signed_file, 0644)
- q.unlock()
- cnt_f.seek(0)
- cnt_f.write("%d\n" % num)
- cnt_f.close()
- os.chmod(path.max_req_no_file, 0644)
+ new_b = []
+ for b in r.batches:
+ if not b.build_failed: new_b.append(b)
+ if new_b == []:
+ return
+ r.batches = new_b
+ # store new queue and max_req_no for binary builders
+ cnt_f = open(path.max_req_no_file, "r+")
+ num = int(string.strip(cnt_f.read())) + 1
+ r.no = num
+ q = B_Queue(path.req_queue_file)
+ q.lock(0)
+ q.read()
+ q.add(r)
+ q.write()
+ q.dump(open(path.queue_stats_file, "w"))
+ q.dump_html(open(path.queue_html_stats_file, "w"))
+ os.chmod(path.queue_stats_file, 0644)
+ os.chmod(path.queue_html_stats_file, 0644)
+ q.write_signed(path.req_queue_signed_file)
+ os.chmod(path.req_queue_signed_file, 0644)
+ q.unlock()
+ cnt_f.seek(0)
+ cnt_f.write("%d\n" % num)
+ cnt_f.close()
+ os.chmod(path.max_req_no_file, 0644)
def transfer_file(r, b):
- local = path.srpms_dir + r.id + "/" + b.src_rpm
- f = b.src_rpm_file
- # export files from chroot
- chroot.run("cat %s; rm -f %s" % (f, f), logfile = local)
- os.chmod(local, 0644)
- ftp.add(local)
+ local = path.srpms_dir + r.id + "/" + b.src_rpm
+ f = b.src_rpm_file
+ # export files from chroot
+ chroot.run("cat %s; rm -f %s" % (f, f), logfile = local)
+ os.chmod(local, 0644)
+ ftp.add(local)
- fname = path.srpms_dir + r.id + "/" + b.src_rpm + ".uploadinfo"
- f = open(fname, "w")
- f.write("info:build:%s:requester:%s\ninfo:build:%s:requester_email:%s\nfile:SRPMS:%s\nEND\n" % (b.gb_id, b.requester, b.gb_id, b.requester_email, b.src_rpm))
- f.close()
- ftp.add(fname, "uploadinfo")
+ fname = path.srpms_dir + r.id + "/" + b.src_rpm + ".uploadinfo"
+ f = open(fname, "w")
+ f.write("info:build:%s:requester:%s\ninfo:build:%s:requester_email:%s\nfile:SRPMS:%s\nEND\n" % (b.gb_id, b.requester, b.gb_id, b.requester_email, b.src_rpm))
+ f.close()
+ ftp.add(fname, "uploadinfo")
def build_srpm(r, b):
- status.push("building %s" % b.spec)
- b.src_rpm = ""
- builder_opts = "-nu --nodeps"
- if b.branch and b.branch.startswith(config.tag_prefixes[0]):
- tag_test=""
- else:
- tag_test=" -Tp %s -tt" % (config.tag_prefixes[0],)
- cmd = ( "cd rpm/SPECS; nice -n %s ./builder %s -bs %s -r %s %s %s 2>&1" %
- (config.nice, builder_opts, b.bconds_string(), b.branch,
- tag_test, b.spec) )
- util.append_to(b.logfile, "request from: %s" % r.requester)
- util.append_to(b.logfile, "started at: %s" % time.asctime())
- util.append_to(b.logfile, "building SRPM using: %s\n" % cmd)
- res = chroot.run(cmd, logfile = b.logfile)
- util.append_to(b.logfile, "exit status %d" % res)
- files = util.collect_files(b.logfile)
- if len(files) > 0:
- if len(files) > 1:
- util.append_to(b.logfile, "error: More then one file produced: %s" % files)
- res = 1
- last = files[len(files) - 1]
- b.src_rpm_file = last
- b.src_rpm = os.path.basename(last)
- r.chroot_files.extend(files)
- else:
- util.append_to(b.logfile, "error: No files produced.")
- res = 1
- if res == 0:
- transfer_file(r, b)
- if res == 0:
- for pref in config.tag_prefixes:
- util.append_to(b.logfile, "tagging: %s" % pref)
- chroot.run("cd rpm/SPECS; ./builder -r %s -Tp %s -Tv %s" % \
- (b.branch, pref, b.spec), logfile = b.logfile)
- chroot.run("cd rpm/SPECS; rpmbuild --nodeps --nobuild " \
- "--clean --rmspec --rmsource %s" % \
- b.spec, logfile = b.logfile)
- status.pop()
- return res
+ status.push("building %s" % b.spec)
+ b.src_rpm = ""
+ builder_opts = "-nu --nodeps"
+ if b.branch and b.branch.startswith(config.tag_prefixes[0]):
+ tag_test=""
+ else:
+ tag_test=" -Tp %s -tt" % (config.tag_prefixes[0],)
+ cmd = ("cd rpm/SPECS; nice -n %s ./builder %s -bs %s -r %s %s %s 2>&1" %
+ (config.nice, builder_opts, b.bconds_string(), b.branch,
+ tag_test, b.spec))
+ util.append_to(b.logfile, "request from: %s" % r.requester)
+ util.append_to(b.logfile, "started at: %s" % time.asctime())
+ util.append_to(b.logfile, "building SRPM using: %s\n" % cmd)
+ res = chroot.run(cmd, logfile = b.logfile)
+ util.append_to(b.logfile, "exit status %d" % res)
+ files = util.collect_files(b.logfile)
+ if len(files) > 0:
+ if len(files) > 1:
+ util.append_to(b.logfile, "error: More then one file produced: %s" % files)
+ res = 1
+ last = files[len(files) - 1]
+ b.src_rpm_file = last
+ b.src_rpm = os.path.basename(last)
+ r.chroot_files.extend(files)
+ else:
+ util.append_to(b.logfile, "error: No files produced.")
+ res = 1
+ if res == 0:
+ transfer_file(r, b)
+ if res == 0:
+ for pref in config.tag_prefixes:
+ util.append_to(b.logfile, "tagging: %s" % pref)
+ chroot.run("cd rpm/SPECS; ./builder -r %s -Tp %s -Tv %s" % \
+ (b.branch, pref, b.spec), logfile = b.logfile)
+ chroot.run("cd rpm/SPECS; rpmbuild --nodeps --nobuild " \
+ "--clean --rmspec --rmsource %s" % \
+ b.spec, logfile = b.logfile)
+ status.pop()
+ return res
def handle_request(r):
- os.mkdir(path.srpms_dir + r.id)
- os.chmod(path.srpms_dir + r.id, 0755)
- ftp.init(r)
- buildlogs.init(r)
- build.build_all(r, build_srpm)
- report.send_report(r, is_src = True)
- report.send_cia_report(r, is_src = True)
- store_binary_request(r)
- ftp.flush()
+ os.mkdir(path.srpms_dir + r.id)
+ os.chmod(path.srpms_dir + r.id, 0755)
+ ftp.init(r)
+ buildlogs.init(r)
+ build.build_all(r, build_srpm)
+ report.send_report(r, is_src = True)
+ report.send_cia_report(r, is_src = True)
+ store_binary_request(r)
+ ftp.flush()
def main():
- init_conf("src")
- if lock("building-srpm", non_block = 1) == None:
- return
- status.push("srpm: processing queue")
- q = B_Queue(path.queue_file)
- if not q.lock(1): return
- q.read()
- if q.requests == []: return
- r = pick_request(q)
- q.write()
- q.unlock()
- status.pop()
- status.push("srpm: handling request from %s" % r.requester)
- handle_request(r)
- status.pop()
+ init_conf("src")
+ if lock("building-srpm", non_block = 1) == None:
+ return
+ status.push("srpm: processing queue")
+ q = B_Queue(path.queue_file)
+ if not q.lock(1): return
+ q.read()
+ if q.requests == []: return
+ r = pick_request(q)
+ q.write()
+ q.unlock()
+ status.pop()
+ status.push("srpm: handling request from %s" % r.requester)
+ handle_request(r)
+ status.pop()
if __name__ == '__main__':
- loop.run_loop(main)
+ loop.run_loop(main)
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
state = []
email = ""
admin = ""
builder_list = ""
def push(s):
- state.append(s)
+ state.append(s)
def pop():
- state.pop()
+ state.pop()
def get():
- return "%s" % state
+ return "%s" % state
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import time
import resource
class Time:
- def __init__(self):
- x = resource.getrusage(resource.RUSAGE_CHILDREN)
- self.user_time = x[0]
- self.sys_time = x[1]
- self.non_io_faults = x[6]
- self.io_faults = x[7]
- self.time = time.time()
-
- def sub(self, x):
- self.user_time -= x.user_time
- self.sys_time -= x.sys_time
- self.non_io_faults -= x.non_io_faults
- self.io_faults -= x.io_faults
- self.time -= x.time
-
- def format(self):
- return "user:%.2fs sys:%.2fs real:%.2fs (faults io:%d non-io:%d)" % \
+ def __init__(self):
+ x = resource.getrusage(resource.RUSAGE_CHILDREN)
+ self.user_time = x[0]
+ self.sys_time = x[1]
+ self.non_io_faults = x[6]
+ self.io_faults = x[7]
+ self.time = time.time()
+
+ def sub(self, x):
+ self.user_time -= x.user_time
+ self.sys_time -= x.sys_time
+ self.non_io_faults -= x.non_io_faults
+ self.io_faults -= x.io_faults
+ self.time -= x.time
+
+ def format(self):
+ return "user:%.2fs sys:%.2fs real:%.2fs (faults io:%d non-io:%d)" % \
(self.user_time, self.sys_time, self.time, self.io_faults,
self.non_io_faults)
-
+
class Timer:
- def __init__(self):
- self.starts = []
+ def __init__(self):
+ self.starts = []
- def start(self):
- self.starts.append(Time())
+ def start(self):
+ self.starts.append(Time())
- def stop(self):
- tmp = Time()
- tmp.sub(self.starts.pop())
- return tmp.format()
+ def stop(self):
+ tmp = Time()
+ tmp.sub(self.starts.pop())
+ return tmp.format()
t = Timer()
def start():
- t.start()
+ t.start()
def stop():
- return t.stop()
+ return t.stop()
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import re
import string
import log
hold = [
- 'poldek',
- 'rpm-build'
+ 'poldek',
+ 'rpm-build'
]
def close_killset(killset):
- k = killset.keys()
- rx = re.compile(r' marks ([^\s]+)-[^-]+-[^-]+$')
- errors = ""
- for p in k:
- if p in hold:
- del killset[p]
- errors += "cannot remove %s because it's crucial\n" % p
- else:
- f = chroot.popen("poldek --noask --test --erase %s" % p, user = "root")
- crucial = 0
- e = []
- for l in f.xreadlines():
- m = rx.search(l)
- if m:
- pkg = m.group(1)
- if pkg in hold:
- errors += "cannot remove %s because it's required by %s, that is crucial\n" % \
- (p, pkg)
- crucial = 1
- e.append(pkg)
- f.close()
- if crucial:
- del killset[p]
- else:
- for p in e:
- killset[p] = 2
- return errors
+ k = killset.keys()
+ rx = re.compile(r' marks ([^\s]+)-[^-]+-[^-]+$')
+ errors = ""
+ for p in k:
+ if p in hold:
+ del killset[p]
+ errors += "cannot remove %s because it's crucial\n" % p
+ else:
+ f = chroot.popen("poldek --noask --test --erase %s" % p, user = "root")
+ crucial = 0
+ e = []
+ for l in f.xreadlines():
+ m = rx.search(l)
+ if m:
+ pkg = m.group(1)
+ if pkg in hold:
+ errors += "cannot remove %s because it's required " \
+ "by %s, that is crucial\n" % (p, pkg)
+ crucial = 1
+ e.append(pkg)
+ f.close()
+ if crucial:
+ del killset[p]
+ else:
+ for p in e:
+ killset[p] = 2
+ return errors
def upgrade_from_batch(r, b):
- f = chroot.popen("rpm --test -F %s 2>&1" % string.join(b.files), user = "root")
- killset = {}
- rx = re.compile(r' ([^\s]+)-[^-]+-[^-]+$')
- for l in f.xreadlines():
- m = rx.search(l)
- if m: killset[m.group(1)] = 1
- f.close()
- if len(killset) != 0:
- err = close_killset(killset)
- if err != "":
- util.append_to(b.logfile, err)
- log.notice("cannot upgrade rpms")
- return
- k = string.join(killset.keys())
- if 0:
- b.log_line("removing %s" % k)
- res = chroot.run("rpm -e %s" % k, logfile = b.logfile, user = "root")
- if res != 0:
- b.log_line("package removal failed")
+ f = chroot.popen("rpm --test -F %s 2>&1" % string.join(b.files), user = "root")
+ killset = {}
+ rx = re.compile(r' ([^\s]+)-[^-]+-[^-]+$')
+ for l in f.xreadlines():
+ m = rx.search(l)
+ if m: killset[m.group(1)] = 1
+ f.close()
+ if len(killset) != 0:
+ err = close_killset(killset)
+ if err != "":
+ util.append_to(b.logfile, err)
+ log.notice("cannot upgrade rpms")
+ return
+ k = string.join(killset.keys())
+ if 0:
+ b.log_line("removing %s" % k)
+ res = chroot.run("rpm -e %s" % k, logfile = b.logfile, user = "root")
+ if res != 0:
+ b.log_line("package removal failed")
+ return
+ else:
+ b.log_line("upgrade would need removal of %s" % k)
+ return
+ b.log_line("upgrading packages")
+ res = chroot.run("rpm -Fvh %s" % string.join(b.files), user = "root")
+ if res != 0:
+ b.log_line("package upgrade failed")
return
- else:
- b.log_line("upgrade would need removal of %s" % k)
- return
- b.log_line("upgrading packages")
- res = chroot.run("rpm -Fvh %s" % string.join(b.files), user = "root")
- if res != 0:
- b.log_line("package upgrade failed")
- return
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import re
import sys
import os
import string
def pkg_name(nvr):
- return re.match(r"(.+)-[^-]+-[^-]+", nvr).group(1)
-
+ return re.match(r"(.+)-[^-]+-[^-]+", nvr).group(1)
+
def msg(m):
- sys.stderr.write(m)
+ sys.stderr.write(m)
def sendfile(src, dst):
- cnt = 0
- while 1:
- s = src.read(10000)
- if s == "": break
- cnt += len(s)
- dst.write(s)
- return cnt
+ cnt = 0
+ while 1:
+ s = src.read(10000)
+ if s == "": break
+ cnt += len(s)
+ dst.write(s)
+ return cnt
def append_to(log, msg):
- f = open(log, "a")
- f.write("%s\n" % msg)
- f.close()
+ f = open(log, "a")
+ f.write("%s\n" % msg)
+ f.close()
def clean_tmp(dir):
- # FIXME: use python
- os.system("rm -f %s/* 2>/dev/null; rmdir %s 2>/dev/null" % (dir, dir))
+ # FIXME: use python
+ os.system("rm -f %s/* 2>/dev/null; rmdir %s 2>/dev/null" % (dir, dir))
def uuid():
- f = os.popen("uuidgen 2>&1")
- u = string.strip(f.read())
- f.close()
- if len(u) != 36:
- raise "uuid: fatal, cannot generate uuid: %s" % u
- return u
+ f = os.popen("uuidgen 2>&1")
+ u = string.strip(f.read())
+ f.close()
+ if len(u) != 36:
+ raise "uuid: fatal, cannot generate uuid: %s" % u
+ return u
def collect_files(log):
- f = open(log)
- rx = re.compile(r"^Wrote: (/home.*\.rpm)$")
- files = []
- for l in f.xreadlines():
- m = rx.search(l)
- if m:
- files.append(m.group(1))
- return files
+ f = open(log)
+ rx = re.compile(r"^Wrote: (/home.*\.rpm)$")
+ files = []
+ for l in f.xreadlines():
+ m = rx.search(l)
+ if m:
+ files.append(m.group(1))
+ return files
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
import sys
import log
import traceback
import status
def wrap(main):
- try:
- main()
- except:
- exctype, value = sys.exc_info()[:2]
- if exctype == SystemExit:
- sys.exit(value)
- s = StringIO.StringIO()
- traceback.print_exc(file = s, limit = 20)
- log.alert("fatal python exception")
- log.alert(s.getvalue())
- log.alert("during: %s" % status.get())
-
- # don't use mailer.py; it safer this way
- f = os.popen("/usr/sbin/sendmail -t", "w")
- f.write("""Subject: builder failure
+ try:
+ main()
+ except:
+ exctype, value = sys.exc_info()[:2]
+ if exctype == SystemExit:
+ sys.exit(value)
+ s = StringIO.StringIO()
+ traceback.print_exc(file = s, limit = 20)
+ log.alert("fatal python exception")
+ log.alert(s.getvalue())
+ log.alert("during: %s" % status.get())
+
+ # don't use mailer.py; it safer this way
+ f = os.popen("/usr/sbin/sendmail -t", "w")
+ f.write("""Subject: builder failure
To: %s
Cc: %s, %s
Date: %s
%s
during: %s
-""" % (status.admin, status.email, status.builder_list, \
- time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), \
- s.getvalue(), status.get()))
- f.close()
+""" % (status.admin, status.email, status.builder_list,
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
+ s.getvalue(), status.get()))
+ f.close()
- sys.exit(1)
+ sys.exit(1)