# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+from __future__ import print_function
+
import sys
import os
import atexit
import time
import datetime
import string
-import urllib
-import urllib2
+import urllib.request
+import urllib.parse
+import urllib.error
from config import config, init_conf
from bqueue import B_Queue
import lock
import util
+import shutil
import loop
import path
import status
def pick_request(q):
def mycmp(r1, r2):
if r1.kind != 'group' or r2.kind != 'group':
- raise Exception, "non-group requests"
- pri_diff = cmp(r1.priority, r2.priority)
+ raise Exception("non-group requests")
+ pri_diff = util.cmp(r1.priority, r2.priority)
if pri_diff == 0:
- return cmp(r1.time, r2.time)
+ return util.cmp(r1.time, r2.time)
else:
return pri_diff
- q.requests.sort(mycmp)
+ q.requests.sort(key=util.cmp_to_key(mycmp))
ret = q.requests[0]
return ret
while not good:
try:
headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' }
- req = urllib2.Request(url=src_url, headers=headers)
- f = urllib2.urlopen(req)
+ req = urllib.request.Request(url=src_url, headers=headers)
+ f = urllib.request.urlopen(req)
good = True
- except urllib2.HTTPError, error:
+ except urllib.error.HTTPError as error:
return False
- except urllib2.URLError, error:
+ except urllib.error.URLError as error:
# see errno.h
try:
errno = error.errno
return False
def fetch_src(r, b):
- src_url = config.control_url + "/srpms/" + r.id + "/" + urllib.quote(b.src_rpm)
+ src_url = config.control_url + "/srpms/" + r.id + "/" + urllib.parse.quote(b.src_rpm)
b.log_line("fetching %s" % src_url)
start = time.time()
good = False
while not good:
try:
headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' }
- req = urllib2.Request(url=src_url, headers=headers)
- f = urllib2.urlopen(req)
+ req = urllib.request.Request(url=src_url, headers=headers)
+ f = urllib.request.urlopen(req)
good = True
- except urllib2.HTTPError, error:
+ except urllib.error.HTTPError as error:
# fail in a way where cron job will retry
msg = "unable to fetch url %s, http code: %d" % (src_url, error.code)
b.log_line(msg)
queue_time = time.time() - r.time
# 6 hours
if error.code != 404 or (queue_time >= 0 and queue_time < (6 * 60 * 60)):
- raise IOError, msg
+ raise IOError(msg)
else:
msg = "in queue for more than 6 hours, download failing"
b.log_line(msg)
return False
- except urllib2.URLError, error:
- # see errno.h
- try:
- errno = error.errno
- except AttributeError:
- # python 2.4
- errno = error.reason[0]
+ except urllib.error.URLError as error:
+ errno = 0
+ if isinstance(error.args[0], IOError):
+ errno = error.args[0].errno
if errno in [-3, 60, 61, 110, 111]:
b.log_line("unable to connect to %s... trying again" % (src_url))
continue
else:
+ try:
+ print("error.errno: %s" % str(error.errno))
+ except Exception as e:
+ print("error.errno: exception %s" % e)
+ try:
+ print("error.reason %s" % str(error.reason))
+ except Exception as e:
+ print("error.reason exception %s" % e)
raise
o = chroot.popen("cat > %s" % b.src_rpm, mode = "w")
try:
- bytes = util.sendfile(f, o)
- except IOError, e:
+ shutil.copyfileobj(f, o)
+ except IOError as e:
b.log_line("error: unable to write to `%s': %s" % (b.src_rpm, e))
raise
+ bytes = float(f.headers['content-length'])
f.close()
o.close()
t = time.time() - start
else:
b.log_line("fetched %d bytes, %.1f K/s" % (bytes, bytes / 1024.0 / t))
-def prepare_env():
+def prepare_env(logfile = None):
chroot.run("""
test ! -f /proc/uptime && mount /proc 2>/dev/null
test ! -c /dev/full && rm -f /dev/full && mknod -m 666 /dev/full c 1 7
# try to limit network access for builder account
/bin/setfacl -m u:builder:--- /etc/resolv.conf
- """, 'root')
+ """, 'root', logfile = logfile)
def build_rpm(r, b):
- if len(b.spec) <= 5:
+ packagename = b.get_package_name()
+ if not packagename:
# should not really get here
b.log_line("error: No .spec not given of malformed: '%s'" % b.spec)
res = "FAIL_INTERNAL"
return res
- packagename = b.spec[:-5]
status.push("building %s (%s)" % (b.spec, packagename))
b.log_line("request from: %s" % r.requester)
return res
b.log_line("started at: %s" % time.asctime())
+
+ b.log_line("killing old processes on a builder")
+ chroot.run("/bin/kill --verbose -9 -1", logfile = b.logfile)
+
+ b.log_line("cleaning up /tmp")
+ chroot.run("rm -rf /tmp/B.*", logfile = b.logfile)
+
fetch_src(r, b)
b.log_line("installing srpm: %s" % b.src_rpm)
res = chroot.run("""
- # b.id %(bid)s
set -ex;
- install -d rpm/packages/%(package)s rpm/BUILD/%(package)s;
- rpm -Uhv %(rpmdefs)s %(src_rpm)s;
+ install -d %(topdir)s/{BUILD,RPMS};
+ LC_ALL=en_US.UTF-8 rpm -qp --changelog %(src_rpm)s;
+ rpm -Uhv --nodeps %(rpmdefs)s %(src_rpm)s;
rm -f %(src_rpm)s;
""" % {
- 'bid' : b.b_id,
- 'package' : packagename,
+ 'topdir' : b.get_topdir(),
'rpmdefs' : b.rpmbuild_opts(),
'src_rpm' : b.src_rpm
}, logfile = b.logfile)
b.files = []
- # it's better to have TMPDIR and BUILD dir on same partition:
- # + /usr/bin/bzip2 -dc /home/services/builder/rpm/packages/kernel/patch-2.6.27.61.bz2
- # patch: **** Can't rename file /tmp/B.a1b1d3/poKWwRlp to drivers/scsi/hosts.c : No such file or directory
- tmpdir = os.environ.get('HOME') + "/rpm/BUILD/%s.%s/tmp" % packagename, b.b_id[0:6]
+ tmpdir = b.tmpdir()
if res:
b.log_line("error: installing src rpm failed")
res = "FAIL_SRPM_INSTALL"
else:
prepare_env()
- chroot.run("install -m 700 -d %s" % tmpdir)
-
+ chroot.run("set -x; install -m 700 -d %s" % tmpdir, logfile=b.logfile)
b.default_target(config.arch)
# check for build arch before filling BR
cmd = "set -ex; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \
- "rpmbuild -bp --short-circuit --nodeps %(rpmdefs)s --define 'prep exit 0' rpm/packages/%(package)s/%(spec)s" % {
+ "rpmbuild -bp --short-circuit --nodeps %(rpmdefs)s --define 'prep exit 0' %(topdir)s/%(spec)s" % {
'tmpdir': tmpdir,
'nice' : config.nice,
+ 'topdir' : b.get_topdir(),
'rpmdefs' : b.rpmbuild_opts(),
- 'package' : packagename,
'spec': b.spec,
}
res = chroot.run(cmd, logfile = b.logfile)
if r.max_jobs > 0:
max_jobs = max(min(config.max_jobs, r.max_jobs), 1)
cmd = "set -ex; : build-id: %(r_id)s; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \
- "rpmbuild -bb --define '_smp_mflags -j%(max_jobs)d' %(rpmdefs)s rpm/packages/%(package)s/%(spec)s" % {
+ "unshare -n -c rpmbuild -bb --define '__jobs %(max_jobs)d' --define '_smp_mflags -j%(max_jobs)d' --define '_make_opts -Otarget' --define '_pld_builder 1' %(rpmdefs)s %(topdir)s/%(spec)s" % {
'r_id' : r.id,
'tmpdir': tmpdir,
'nice' : config.nice,
'rpmdefs' : b.rpmbuild_opts(),
- 'package' : packagename,
+ 'topdir' : b.get_topdir(),
'max_jobs' : max_jobs,
'spec': b.spec,
}
b.log_line("ended at: %s, done in %s" % (time.asctime(), datetime.timedelta(0, end_time - begin_time)))
if res:
res = "FAIL"
- files = util.collect_files(b.logfile)
+ files = util.collect_files(b.logfile, basedir = b.get_topdir())
if len(files) > 0:
r.chroot_files.extend(files)
else:
res = "FAIL_%s" % last_section.upper()
b.files = files
+ # cleanup tmp and build files
chroot.run("""
set -ex;
- rpmbuild %(rpmdefs)s --nodeps --nobuild --clean --rmspec --rmsource rpm/packages/%(package)s/%(spec)s
- rm -rf %(tmpdir)s;
- chmod -R u+rwX rpm/BUILD/%(package)s;
- rm -rf rpm/BUILD/%(package)s;
- """ %
- {'tmpdir' : tmpdir, 'spec': b.spec, 'package' : packagename, 'rpmdefs' : b.rpmbuild_opts()}, logfile = b.logfile)
+ chmod -R u+rwX %(topdir)s/BUILD;
+ rm -rf %(topdir)s/{tmp,BUILD}
+ """ % {
+ 'topdir' : b.get_topdir(),
+ }, logfile = b.logfile)
def ll(l):
util.append_to(b.logfile, l)
b.log_line("copy rpm files to cache_dir: %s" % rpm_cache_dir)
chroot.run(
"cp -f %s %s && poldek --mo=nodiff --mkidxz -s %s/" % \
- (string.join(b.files), rpm_cache_dir, rpm_cache_dir),
+ (' '.join(b.files), rpm_cache_dir, rpm_cache_dir),
logfile = b.logfile, user = "root"
)
else:
chroot.cp(f, outfile = local, rm = True)
ftp.add(local)
+ # cleanup all remains from this build
+ chroot.run("""
+ set -ex;
+ rm -rf %(topdir)s;
+ """ % {
+ 'topdir' : b.get_topdir(),
+ }, logfile = b.logfile)
+
def uploadinfo(b):
c="file:SRPMS:%s\n" % b.src_rpm
for f in b.files:
do_exit = 0
try:
f = open("/proc/loadavg")
- if float(string.split(f.readline())[2]) > config.max_load:
+ if float(f.readline().split()[2]) > config.max_load:
do_exit = 1
except:
pass
f.close()
l.close()
else:
+ # be able to avoid locking with very low priority
+ if req.priority > -1000:
+ # don't kill server
+ check_load()
+ # allow only one build in given builder at once
+ if not lock.lock("building-high-priority-rpm-for-%s" % config.builder, non_block = 1):
+ return
+
msg = "HIGH PRIORITY: "
msg += "handling request %s (%d) for %s from %s, priority %s" \
q.lock(0)
q.read()
previouslen=len(q.requests)
- q.requests=filter(otherreqs, q.requests)
+ q.requests=list(filter(otherreqs, q.requests))
if len(q.requests)<previouslen:
q.write()
q.unlock()
def main():
if len(sys.argv) < 2:
- raise Exception, "fatal: need to have builder name as first arg"
+ raise Exception("fatal: need to have builder name as first arg")
return main_for(sys.argv[1])
if __name__ == '__main__':