self.name = filename
self.requests = []
self.fd = None
+
+ def dump(self, f):
+ for r in self.requests:
+ r.dump(f)
# read possibly compressed, signed queue
def read_signed(self):
id = util.uuid()
os.system("bzip2 --best --force < %s > %s" \
% (logfile, path.buildlogs_queue_dir + id))
+
+ if l['failed']: s = "FAIL"
+ else: s = "OK"
+ f = open(path.buildlogs_queue_dir + id + ".info", "w")
+ f.write("Status: %s\nEND\n" % s)
+ f.close()
+
self.queue.append({'name': name, 'id': id, 'failed': failed})
def flush(self):
def desc(l):
- if l['failed']: s = "FAIL"
- elif self.some_failed: s = "OKOF" # OK but Others Failed
- else: s = "OK"
return """Target: %s/%s
Builder: %s
-Status: %s
-Store-desc: yes
Time: %d
Requester: %s
END
-""" % (config.buildlogs_url, l['name'], config.builder, s, time.time(), acl.current_user)
+""" % (config.buildlogs_url, l['name'], config.builder, time.time(), acl.current_user)
for l in self.queue:
f = open(path.buildlogs_queue_dir + l['id'] + ".desc", "w")
else:
log.panic("cannot find %s::%s" % (builder, o))
+ if builder == "src":
+ self.builder = get("src_builder", builder)
+ else
+ self.builder = builder
+
p.readfp(open(path.builder_conf))
self.builders = string.split(get("builders"))
self.control_url = get("control_url")
self.queue_signed_by = get("queue_signed_by")
+ self.notify_email = get("notify_email")
self.admin_email = get("admin_email")
self.email = self.admin_email
- self.builder = builder
if builder == "all":
return
log.builder = builder
if builder == "": builder = "all"
config.read(builder)
+ log.builder = config.builder
status.pop()
if send_file(d['_file'], d['Target']):
error = d
break
- if d.has_key('Store-desc') and d['Store-desc'] == "yes":
- if send_file(d['_desc'], d['Target'] + ".desc"):
+ if os.access(d['_file'] + ".info", os.F_OK):
+ if send_file(d['_file'] + ".info", d['Target'] + ".info"):
error = d
break
os.unlink(d['_file'])
return (emails, body)
def sign(buf):
- (gpg_out, gpg_in, gpg_err) = popen2.popen3("gpg --clearsign --default-key builder")
+ (gpg_out, gpg_in, gpg_err) = popen2.popen3("gpg --clearsign")
util.sendfile(buf, gpg_in)
gpg_in.close()
body = StringIO.StringIO()
import sys
+import time
+
+import path
builder = ""
def log(s):
- sys.stderr.write("LOG[%s]: %s\n" % (builder, s))
+ f = open(path.log_file, "a")
+ f.stderr.write("%s [%s]: %s\n" % (time.asctime(), builder, s))
+ f.close()
def alert(s):
log("alert: %s" % s)
util.sendfile(self.body, f)
def send(self):
- # FIXME
- self.write_to(sys.stdout)
+ f = os.popen("/usr/sbin/sendmail -t")
+ self.write_to(f)
+ f.close()
--- /dev/null
+import StringIO
+
+import mailer
+import gpg
+
+class Notifier:
+ def __init__(self, g):
+ self.xml = StringIO.StringIO()
+ self.xml.write("<notification group-id='%s' builder='%s'>\n" % \
+ (g.id, config.builder))
+
+ def send():
+ self.xml.write("</notification>\n")
+ msg = mailer.Message()
+ msg.set_headers(to = config.notify_email, subject = "status notification")
+ msg.write(gpg.sign(self.xml))
+ msg.send()
+ self.xml = None
+
+ def add_batch(b, s):
+ self.xml.write(" <batch id='%s' status='%s'>\n" % (b.b_id, s))
+
+n = None
+
+def begin(group):
+ n = Notifier(group)
+
+def add_batch(batch, status):
+ n.add_batch(batch, status)
+
+def send():
+ n.send()
ftp_queue_dir = spool_dir + "ftp/"
last_req_no_file = spool_dir + "last_req_no"
got_lock_file = spool_dir + "got_lock"
+log_file = spool_dir + "log"
# www/
srpms_dir = www_dir + "srpms/"
req_queue_signed_file = www_dir + "queue.gz"
max_req_no_file = www_dir + "max_req_no"
+queue_state_file = www_dir + "queue.txt"
import util
import chroot
from acl import acl
+import notify
__all__ = ['parse_request', 'parse_requests']
raise "xml: dependency not found in group"
b.depends_on = deps
- def dump(self):
- print "group: %s @%d" % (self.id, self.priority)
- print " from: %s" % self.requester
- print " time: %s" % time.asctime(time.localtime(self.time))
+ def dump(self, f):
+ f.write("group: %d (id=%s pri=%d)\n" % (self.no, self.id, self.priority))
+ f.write(" from: %s\n" % self.requester)
+ f.write(" time: %s\n" % time.asctime(time.localtime(self.time)))
for b in self.batches:
- b.dump()
+ b.dump(f)
def write_to(self, f):
f.write("""
f.write(" </group>\n\n")
def build_all(r, build_fnc):
+ notify.begin(r)
+
tmp = path.spool_dir + util.uuid() + "/"
r.tmp_dir = tmp
os.mkdir(tmp)
batch.build_failed = build_fnc(r, batch)
if batch.build_failed:
log.notice("building %s FAILED" % batch.spec)
+ notify.add_batch(batch, "FAIL")
else:
r.some_ok = 1
log.notice("building %s OK" % batch.spec)
+ notify.add_batch(batch, "OK")
else:
batch.build_failed = 1
batch.skip_reason = "SKIPED [%s failed]" % failed_dep
batch.logfile = None
log.notice("building %s %s" % (batch.spec, batch.skip_reason))
+ notify.add_batch(batch, "SKIP")
def clean_files(r):
chroot.run("rm -f %s" % string.join(r.chroot_files))
m.send()
+ def is_done(self):
+ ok = 1
+ for b in self.batches:
+ if not b.is_done():
+ ok = 0
+ return ok
+
class Batch:
def __init__(self, e):
self.bconds_with = []
self.branch = text(c)
elif c.nodeName == "builder":
self.builders.append(text(c))
+ self.builders_status[text(c)] = attr(c, "status", "?")
elif c.nodeName == "with":
self.bconds_with.append(text(c))
elif c.nodeName == "without":
self.bconds_without.append(text(c))
else:
raise "xml: evil batch child (%s)" % c.nodeName
-
- def dump(self):
- print " batch: %s/%s" % (self.src_rpm, self.spec)
- print " info: %s" % self.info
- print " branch: %s" % self.branch
- print " bconds: %s" % self.bconds_string()
- print " for: %s" % string.join(self.builders)
+
+ def is_done(self):
+ ok = 1
+ for b in self.builders:
+ s = self.builders_status[b]
+ if not (s == "OK" or s == "FAIL" or s == "SKIP"):
+ ok = 0
+ return ok
+
+ def dump(self, f):
+ f.write(" batch: %s/%s\n" % (self.src_rpm, self.spec))
+ f.write(" info: %s\n" % self.info)
+ f.write(" branch: %s\n" % self.branch)
+ f.write(" bconds: %s\n" % self.bconds_string())
+ builders = []
+ for b in self.builders:
+ builders.append("%s:%s" % (b, self.builders_status[b]))
+ f.write(" builders: %s\n" % string.join(builders))
def bconds_string(self):
r = ""
for b in self.bconds_without:
f.write(" <without>%s</without>\n" % escape(b))
for b in self.builders:
- f.write(" <builder>%s</builder>\n" % escape(b))
+ f.write(" <builder status='%s'>%s</builder>\n" % \
+ (escape(self.builders_status[b]), escape(b)))
f.write(" </batch>\n")
+class Notification:
+ def __init__(self, e):
+ self.batches = []
+ self.kind = 'notification'
+ self.group_id = attr(e, "group-id")
+ self.builder = attr(e, "builder")
+ self.batches = {}
+ for c in e.childNodes:
+ if is_blank(c): continue
+ if c.nodeType != Element.ELEMENT_NODE:
+ raise "xml: evil notification child %d" % c.nodeType
+ if c.nodeName == "batch":
+ id = attr(e, "id")
+ status = attr(e, "status")
+ if status != "OK" and status != "FAIL" and status != "SKIP":
+ raise "xml notification: bad status: %s" % self.status
+ self.batches[id] = status
+ else:
+ raise "xml: evil notification child (%s)" % c.nodeName
+
+ def apply_to(self, q):
+ for r in q.requests:
+ if r.kind == "group":
+ for b in r.batches:
+ if self.batches.has_key(b.b_id):
+ b.builders_status[self.builder] = self.batches[b.b_id]
+
def build_request(e):
if e.nodeType != Element.ELEMENT_NODE:
raise "xml: evil request element"
if e.nodeName == "group":
return Group(e)
+ elif e.nodeName == "notification":
+ return Notification(e)
elif e.nodeName == "command":
# FIXME
return Command(e)
q.write()
q.unlock()
+def handle_notification(r, user):
+ if not user.can_do("notify", r.builder):
+ log.alert("user %s is not allowed to notify:%s" % (user.login, r.builder))
+ q = B_Queue(path.req_queue_file)
+ q.lock(0)
+ q.read()
+ not_fin = filter(lambda (r): not r.is_done(), q.requests)
+ r.apply_to(q)
+ for r in not_fin:
+ if r.is_done():
+ clean_tmp(path.srpms_dir + r.id)
+ now = time.time()
+ def leave_it(r):
+ return not r.is_done() or r.time + 4 * 24 * 60 * 60 > now
+ q.requests = filter(leave_it, q.requests)
+ q.write()
+ q.dump(open(path.queue_stats_file, "w"))
+ q.unlock()
+
def handle_request(f):
sio = StringIO.StringIO()
util.sendfile(f, sio)
r = request.parse_request(body)
if r.kind == 'group':
handle_group(r, user)
+ elif r.kind == 'notification':
+ handle_notification(r, user)
else:
msg = "%s: don't know how to handle requests of this kind '%s'" \
% (user.get_login(), r.kind)
import chroot
import ftp
import buildlogs
+import notify
# this code is duplicated in srpm_builder, but we
# might want to handle some cases differently here
r.send_report()
buildlogs.flush()
ftp.flush()
+ notify.send()
def check_load():
try:
q.read()
q.add(r)
q.write()
- q.unlock()
+ q.dump(open(path.queue_stats_file, "w"))
q.write_signed(path.req_queue_signed_file)
+ q.unlock()
cnt_f.seek(0)
cnt_f.write("%d\n" % num)
cnt_f.close()
# shell wildcards.
#
# Actions:
-# src -- build src rpm (only makes sense in src:src)
+# src -- build src rpm (only makes sense if <which-builder> is src
+# builder)
# binary -- build binary rpm
# notify -- can send notification about build process on given builder.
# Used in entries for binary builders.
[malekith]
emails = malekith@roke.freak malekith@pld-linux.org
-privs = src:src binary:*
+privs = src:roke-src binary:roke-*
[all]
# builder email (in From:)
email = malekith@roke.freak
+admin_email = malekith@roke.freak
+
+# ------ Binary builders config:
# how much jobs can be run at once
job_slots = 1
# maximal load, at which rpm processing can start
max_load = 10.0
# where to look for queue.gz and counter
control_url = http://roke.freak/builder/
-# list of binary builders
-builders = athlon
# login of user (from acl.conf) that should sign queue
-queue_signed_by = malekith
-#
-admin_email = malekith@roke.freak
+queue_signed_by = src_builder_roke
+# email where to send status notifications
+notify_email = srpms_builder@roke.freak
+
+# ------- Src builder config:
+# list of binary builders
+binary_builders = athlon
+# the only src builder
+src_builder = roke-src
-[src]
+[roke-src]
arch = athlon
chroot = /adm/chroot-src
-buildlogs_url = scp://malekith@roke.freak/home/services/ftpd/buildlogs/
-ftp_url = scp://malekith@roke.freak/home/services/ftpd/dists/nest/NEW/ready/SRPMS
+buildlogs_url = rsync://bl:foobar123@roke.freak/home/services/ftpd/buildlogs/nest/SRPMS/.new/
+ftp_url = scp://ftpadm@roke.freak/home/services/ftpd/dists/nest/ready/SRPMS
-[athlon]
+[roke-athlon]
arch = athlon
chroot = /adm/chroot-athlon
-buildlogs_url = scp://malekith@roke.freak/home/services/ftpd/buildlogs/
-ftp_url = scp://malekith@roke.freak/home/services/ftpd/dists/nest/NEW/ready/athlon
+buildlogs_url = rsync://bl:foobar123@roke.freak/home/services/ftpd/buildlogs/nest/athlon/.new/
+ftp_url = scp://ftpadm@roke.freak/home/services/ftpd/dists/nest/ready/athlon
-- sending reports from binary to src builder, so it can update its queue and
- clean srpms that ain't needed anymore
-
- store rpms in /spool/ready/ in chroot? (rpm_builder.py)
- deps install/uninstall machinery (hard stuff -- building deps tree already
done)
-- make mailer.py actually send mail (easy)
-
- fix make-request.sh to be more user-friendly and configurable
- report time consumed by build