# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+from __future__ import print_function
+
import sys
import os
import atexit
import time
import datetime
import string
-import urllib
-import urllib2
+import urllib.request
+import urllib.parse
+import urllib.error
from config import config, init_conf
from bqueue import B_Queue
import lock
import util
+import shutil
import loop
import path
import status
def pick_request(q):
def mycmp(r1, r2):
if r1.kind != 'group' or r2.kind != 'group':
- raise Exception, "non-group requests"
- pri_diff = cmp(r1.priority, r2.priority)
+ raise Exception("non-group requests")
+ pri_diff = util.cmp(r1.priority, r2.priority)
if pri_diff == 0:
- return cmp(r1.time, r2.time)
+ return util.cmp(r1.time, r2.time)
else:
return pri_diff
- q.requests.sort(mycmp)
+ q.requests.sort(key=util.cmp_to_key(mycmp))
ret = q.requests[0]
return ret
while not good:
try:
headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' }
- req = urllib2.Request(url=src_url, headers=headers)
- f = urllib2.urlopen(req)
+ req = urllib.request.Request(url=src_url, headers=headers)
+ f = urllib.request.urlopen(req)
good = True
- except urllib2.HTTPError, error:
+ except urllib.error.HTTPError as error:
return False
- except urllib2.URLError, error:
+ except urllib.error.URLError as error:
# see errno.h
try:
errno = error.errno
return False
def fetch_src(r, b):
- src_url = config.control_url + "/srpms/" + r.id + "/" + urllib.quote(b.src_rpm)
+ src_url = config.control_url + "/srpms/" + r.id + "/" + urllib.parse.quote(b.src_rpm)
b.log_line("fetching %s" % src_url)
start = time.time()
good = False
while not good:
try:
headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' }
- req = urllib2.Request(url=src_url, headers=headers)
- f = urllib2.urlopen(req)
+ req = urllib.request.Request(url=src_url, headers=headers)
+ f = urllib.request.urlopen(req)
good = True
- except urllib2.HTTPError, error:
+ except urllib.error.HTTPError as error:
# fail in a way where cron job will retry
msg = "unable to fetch url %s, http code: %d" % (src_url, error.code)
b.log_line(msg)
queue_time = time.time() - r.time
# 6 hours
if error.code != 404 or (queue_time >= 0 and queue_time < (6 * 60 * 60)):
- raise IOError, msg
+ raise IOError(msg)
else:
msg = "in queue for more than 6 hours, download failing"
b.log_line(msg)
return False
- except urllib2.URLError, error:
- # see errno.h
- try:
- errno = error.errno
- except AttributeError:
- # python 2.4
- errno = error.reason[0]
+ except urllib.error.URLError as error:
+ errno = 0
+ if isinstance(error.args[0], IOError):
+ errno = error.args[0].errno
if errno in [-3, 60, 61, 110, 111]:
b.log_line("unable to connect to %s... trying again" % (src_url))
continue
else:
+ try:
+ print("error.errno: %s" % str(error.errno))
+ except Exception as e:
+ print("error.errno: exception %s" % e)
+ try:
+ print("error.reason %s" % str(error.reason))
+ except Exception as e:
+ print("error.reason exception %s" % e)
raise
o = chroot.popen("cat > %s" % b.src_rpm, mode = "w")
try:
- bytes = util.sendfile(f, o)
- except IOError, e:
+ shutil.copyfileobj(f, o)
+ except IOError as e:
b.log_line("error: unable to write to `%s': %s" % (b.src_rpm, e))
raise
+ bytes = float(f.headers['content-length'])
f.close()
o.close()
t = time.time() - start
return res
b.log_line("started at: %s" % time.asctime())
+
+ b.log_line("killing old processes on a builder")
+ chroot.run("/bin/kill --verbose -9 -1", logfile = b.logfile)
+
+ b.log_line("cleaning up /tmp")
+ chroot.run("rm -rf /tmp/B.*", logfile = b.logfile)
+
fetch_src(r, b)
b.log_line("installing srpm: %s" % b.src_rpm)
res = chroot.run("""
set -ex;
install -d %(topdir)s/{BUILD,RPMS};
+ LC_ALL=en_US.UTF-8 rpm -qp --changelog %(src_rpm)s;
rpm -Uhv --nodeps %(rpmdefs)s %(src_rpm)s;
rm -f %(src_rpm)s;
""" % {
- 'topdir' : b._topdir,
+ 'topdir' : b.get_topdir(),
'rpmdefs' : b.rpmbuild_opts(),
'src_rpm' : b.src_rpm
}, logfile = b.logfile)
res = "FAIL_SRPM_INSTALL"
else:
prepare_env()
- chroot.run("install -m 700 -d %s" % tmpdir)
-
+ chroot.run("set -x; install -m 700 -d %s" % tmpdir, logfile=b.logfile)
b.default_target(config.arch)
# check for build arch before filling BR
cmd = "set -ex; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \
"rpmbuild -bp --short-circuit --nodeps %(rpmdefs)s --define 'prep exit 0' %(topdir)s/%(spec)s" % {
'tmpdir': tmpdir,
'nice' : config.nice,
- 'topdir' : b._topdir,
+ 'topdir' : b.get_topdir(),
'rpmdefs' : b.rpmbuild_opts(),
'spec': b.spec,
}
if r.max_jobs > 0:
max_jobs = max(min(config.max_jobs, r.max_jobs), 1)
cmd = "set -ex; : build-id: %(r_id)s; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \
- "rpmbuild -bb --define '_smp_mflags -j%(max_jobs)d' %(rpmdefs)s %(topdir)s/%(spec)s" % {
+ "unshare -n -c rpmbuild -bb --define '__jobs %(max_jobs)d' --define '_smp_mflags -j%(max_jobs)d' --define '_make_opts -Otarget' --define '_pld_builder 1' %(rpmdefs)s %(topdir)s/%(spec)s" % {
'r_id' : r.id,
'tmpdir': tmpdir,
'nice' : config.nice,
'rpmdefs' : b.rpmbuild_opts(),
- 'topdir' : b._topdir,
+ 'topdir' : b.get_topdir(),
'max_jobs' : max_jobs,
'spec': b.spec,
}
b.log_line("ended at: %s, done in %s" % (time.asctime(), datetime.timedelta(0, end_time - begin_time)))
if res:
res = "FAIL"
- files = util.collect_files(b.logfile, basedir = b._topdir)
+ files = util.collect_files(b.logfile, basedir = b.get_topdir())
if len(files) > 0:
r.chroot_files.extend(files)
else:
chmod -R u+rwX %(topdir)s/BUILD;
rm -rf %(topdir)s/{tmp,BUILD}
""" % {
- 'topdir' : b._topdir,
+ 'topdir' : b.get_topdir(),
}, logfile = b.logfile)
def ll(l):
b.log_line("copy rpm files to cache_dir: %s" % rpm_cache_dir)
chroot.run(
"cp -f %s %s && poldek --mo=nodiff --mkidxz -s %s/" % \
- (string.join(b.files), rpm_cache_dir, rpm_cache_dir),
+ (' '.join(b.files), rpm_cache_dir, rpm_cache_dir),
logfile = b.logfile, user = "root"
)
else:
set -ex;
rm -rf %(topdir)s;
""" % {
- 'topdir' : b._topdir,
+ 'topdir' : b.get_topdir(),
}, logfile = b.logfile)
def uploadinfo(b):
do_exit = 0
try:
f = open("/proc/loadavg")
- if float(string.split(f.readline())[2]) > config.max_load:
+ if float(f.readline().split()[2]) > config.max_load:
do_exit = 1
except:
pass
f.close()
l.close()
else:
+ # be able to avoid locking with very low priority
+ if req.priority > -1000:
+ # don't kill server
+ check_load()
+ # allow only one build in given builder at once
+ if not lock.lock("building-high-priority-rpm-for-%s" % config.builder, non_block = 1):
+ return
+
msg = "HIGH PRIORITY: "
msg += "handling request %s (%d) for %s from %s, priority %s" \
q.lock(0)
q.read()
previouslen=len(q.requests)
- q.requests=filter(otherreqs, q.requests)
+ q.requests=list(filter(otherreqs, q.requests))
if len(q.requests)<previouslen:
q.write()
q.unlock()
def main():
if len(sys.argv) < 2:
- raise Exception, "fatal: need to have builder name as first arg"
+ raise Exception("fatal: need to have builder name as first arg")
return main_for(sys.argv[1])
if __name__ == '__main__':