def rm(file):
os.remove(file)
- #print 'rm: '+file
def mv(src, dst):
- os.rename(src, dst+'/'+src.split('/')[-1])
- #print "mv: %s %s" % (src, dst+'/'+src.split('/')[-1])
+ os.rename(src, dst + '/' + src.split('/')[-1])
def findfiles(dir):
def filterinfos(x):
- if x[-11:]=='.uploadinfo':
+ if x[-11:] == '.uploadinfo':
return True
else:
return False
return filter(filterinfos, os.listdir(dir))
def getcontent(file):
- f=open(file, 'r')
- content=f.read()
+ f = open(file, 'r')
+ content = f.read()
f.close()
- if not content[-5:]=='\nEND\n':
+ if not content[-5:] == '\nEND\n':
return None
else:
return content[:-4]
-ftptree=BaseFtpTree(cval['default_to'])
+ftptree = BaseFtpTree(cval['default_to'])
if not ftpio.lock(cval['default_to']):
sys.exit(0)
-for uploadinfo in findfiles(incoming_dir+'SRPMS'):
- content=getcontent(incoming_dir+'SRPMS/'+uploadinfo)
+for uploadinfo in findfiles(incoming_dir + 'SRPMS'):
+ content = getcontent(incoming_dir + 'SRPMS/' + uploadinfo)
if not content:
continue # Uploading not finished
- pkg=BasePkg(uploadinfo[:-19], content=content)
- srpm=pkg.files['SRPMS'][0]
+ pkg = BasePkg(uploadinfo[:-19], content = content)
+ srpm = pkg.files['SRPMS'][0]
if ftptree.has_key(`pkg`):
- ftpio.log("%s already present in %s; removing newer files" %
- (srpm, ftptree))
- rm(incoming_dir+'SRPMS/'+srpm)
- f=open(default_to+'SRPMS/.metadata/'+srpm+'.info', 'a')
- bid=pkg.build.keys()[0]
- build=pkg.build[bid]
+ ftpio.log("%s already present in %s; removing newer files" % (srpm, ftptree))
+ rm(incoming_dir + 'SRPMS/' + srpm)
+ f = open(default_to + 'SRPMS/.metadata/' + srpm+'.info', 'a')
+ bid = pkg.build.keys()[0]
+ build = pkg.build[bid]
f.write("info:build:%s:requester:%s\ninfo:build:%s:requester_email:%s\n"
% (bid, build.requester, bid, build.requester_email))
f.close()
else:
- mv(incoming_dir+'SRPMS/'+srpm, default_to+'SRPMS/RPMS')
+ mv(incoming_dir + 'SRPMS/' + srpm, default_to + 'SRPMS/RPMS')
- f=open(default_to+'SRPMS/.metadata/'+srpm+'.info', 'w')
+ f = open(default_to + 'SRPMS/.metadata/' + srpm + '.info', 'w')
f.write(content)
f.close()
- rm(incoming_dir+'SRPMS/'+uploadinfo)
+ rm(incoming_dir + 'SRPMS/' + uploadinfo)
def send_noarch_msg(files_differ, reqs_differ, pkg, rpm, arch):
req_email=pkg.build[pkg.lastbid].requester_email
def move_noarch(f, arch, rpm, dstpkg):
if dstpkg.noarch_arch.has_key(rpm):
os.system("rpm -qlp %s | LC_ALL=C sort > %s/files.new" %
- (incoming_dir+arch+'/'+rpm, tmpdir))
+ (incoming_dir + arch + '/' + rpm, tmpdir))
os.system("rpm -qRp %s | LC_ALL=C sort | uniq > %s/reqs.new" %
- (incoming_dir+arch+'/'+rpm, tmpdir))
+ (incoming_dir + arch + '/' + rpm, tmpdir))
- files_differ=False
- reqs_differ=False
+ files_differ = False
+ reqs_differ = False
if os.system("diff -u %s/%s.filelist %s/files.new > %s/files.diff" %
(noarchcachedir, rpm, tmpdir, tmpdir)):
- files_differ=True
+ files_differ = True
if os.system("diff -u %s/%s.reqlist %s/reqs.new > %s/reqs.diff" %
(noarchcachedir, rpm, tmpdir, tmpdir)):
- reqs_differ=True
+ reqs_differ = True
if files_differ or reqs_differ:
send_noarch_msg(files_differ, reqs_differ, dstpkg, rpm, arch)
- rm(incoming_dir+arch+'/'+rpm)
+ rm(incoming_dir + arch + '/' + rpm)
else:
os.system("rpm -qlp %s > %s/%s.filelist" %
- (incoming_dir+arch+'/'+rpm, noarchcachedir, rpm))
+ (incoming_dir + arch + '/' + rpm, noarchcachedir, rpm))
os.system("rpm -qRp %s |sort|uniq > %s/%s.reqlist" %
- (incoming_dir+arch+'/'+rpm, noarchcachedir, rpm))
+ (incoming_dir + arch + '/' + rpm, noarchcachedir, rpm))
f.write("file:noarch:%s\ninfo:noarch_arch:%s:%s\n" % (rpm, rpm, arch))
- mv(incoming_dir+arch+'/'+rpm, default_to+'noarch/RPMS')
+ mv(incoming_dir + arch + '/' + rpm, default_to + 'noarch/RPMS')
for arch in ftp_archs:
for uploadinfo in findfiles(incoming_dir+arch):
- content=getcontent(incoming_dir+arch+'/'+uploadinfo)
+ content = getcontent(incoming_dir + arch + '/' + uploadinfo)
if not content:
+ print "%s not finished uploading" % uploadinfo
continue # Uploading not finished
- srcpkg=BasePkg(uploadinfo[:-19], content=content)
- srpm=srcpkg.files['SRPMS'][0]
+ srcpkg = BasePkg(uploadinfo[:-19], content = content)
+ srpm = srcpkg.files['SRPMS'][0]
if not ftptree.has_key(`srcpkg`):
continue # We require the src.rpm to be present
- dstpkg=BasePkg(`srcpkg`, ftptree)
+ dstpkg = BasePkg(`srcpkg`, ftptree)
if dstpkg.files.has_key(arch):
ftpio.log("files from %s for arch %s already present in %s; removing newer files" % (`srcpkg`, arch, ftptree))
for rpm in srcpkg.files['ARCH']:
try:
- rm(incoming_dir+arch+'/'+rpm)
+ rm(incoming_dir + arch + '/'+rpm)
except OSError, e:
- l = "Removing %s problem: %s" % (incoming_dir+arch+'/'+rpm, e)
+ l = "Removing %s problem: %s" % (incoming_dir + arch + '/' + rpm, e)
ftpio.log(l)
print l
- rm(incoming_dir+arch+'/'+uploadinfo)
+ rm(incoming_dir + arch + '/' + uploadinfo)
continue
- f=open(default_to+'SRPMS/.metadata/'+srpm+'.info', 'a')
+ f = open(default_to + 'SRPMS/.metadata/' + srpm + '.info', 'a')
for rpm in srcpkg.files['ARCH']:
- if rpm[-11:]=='.noarch.rpm' and config.separate_noarch:
+ if rpm[-11:] == '.noarch.rpm' and config.separate_noarch:
move_noarch(f, arch, rpm, dstpkg)
else:
f.write("file:%s:%s\n" % (arch, rpm))
+ srcfile = incoming_dir + arch + '/' + rpm
+
+ dstfile = default_to + arch + '/RPMS'
+
try:
- mv(incoming_dir+arch+'/'+rpm, default_to+arch+'/RPMS')
+ mv(srcfile, dstfile)
except OSError, e:
- l = "Moving %s to %s problem: %s" % (incoming_dir+arch+'/'+rpm, default_to+arch+'/RPMS', e)
+ l = "Moving %s to %s problem: %s" % (srcfile, dstfile, e)
ftpio.log(l)
print l
f.close()
- rm(incoming_dir+arch+'/'+uploadinfo)
+ rm(incoming_dir + arch + '/' + uploadinfo)
ftpio.unlock(cval['default_to'])
-
try:
opts, args = getopt.getopt(sys.argv[1:], None, ["nopoldek", "noyum", "norpmrepo", "poldek", "yum", "rpmrepo"])
except getopt.GetoptError:
- print "ERR: not enough parameters given"
- print "gen-indexes.py [--[no]poldek] [--[no]yum] [--[no]rpmrepo] tree [tree2...]"
+ print >>sys.stderr, "ERR: not enough parameters given"
+ print >>sys.stderr, "gen-indexes.py [--[no]poldek] [--[no]yum] [--[no]rpmrepo] tree [tree2...]"
sys.exit(1)
do_poldek = True
do_rpmrepo = True
if not do_poldek and not do_yum and not do_rpmrepo:
- print "ERR: speciy at least one action"
+ print >>sys.stderr, "ERR: speciy at least one action"
sys.exit(1)
-trees=args
+trees = args
for tree in trees:
checkdir(tree)
ftpio.connect('gen-indexes')
-locked=[]
+locked = []
for tree in trees:
if ftpio.lock(tree, True):
locked.append(tree)
else:
- print "ERR: %s tree already locked" % tree
+ print >>sys.stderr, "ERR: %s tree already locked" % tree
for i in locked:
ftpio.unlock(i)
sys.exit(1)
-home=os.environ['HOME']
+home = os.environ['HOME']
os.umask(022)
os.nice(19)
if do_poldek:
+ poldek = '%s.stat/bin/poldek-new --cachedir=%s/tmp/poldek --conf %s.stat/etc/poldek.conf --mkidxz' % (ftp_dir, home, ftp_dir)
+
for tree in trees:
print '-------------------------- %s --------------------------' % tree
for arch in all_ftp_archs:
print 'generate poldek index for %s' % arch
- if config.old_poldek:
- os.system('%s.stat/bin/poldek --cachedir=%s/tmp/poldek -c %s.stat/etc/poldek.conf -s %s%s/%s/RPMS/ --mkidxz' %
- (ftp_dir,home,ftp_dir,ftp_dir,tree,arch))
- else:
- if config.poldek_indexes != "old":
- os.system('%s.stat/bin/poldek-new --cachedir=%s/tmp/poldek --conf %s.stat/etc/poldek.conf -s %s%s/%s/RPMS/ --mkidxz --mkidx-type pndir' %
- (ftp_dir,home,ftp_dir,ftp_dir,tree,arch))
- if config.poldek_indexes != "new":
- os.system('%s.stat/bin/poldek-new --cachedir=%s/tmp/poldek --conf %s.stat/etc/poldek.conf -s %s%s/%s/RPMS/ --mkidxz --mkidx-type pdir' %
- (ftp_dir,home,ftp_dir,ftp_dir,tree,arch))
+ if config.poldek_indexes != "old":
+ os.system('%s -s %s%s/%s/RPMS/ --mkidxz --mkidx-type pndir' % (poldek, ftp_dir, tree, arch))
+ if config.poldek_indexes != "new":
+ os.system('%s -s %s%s/%s/RPMS/ --mkidxz --mkidx-type pdir' % (poldek, ftp_dir, tree, arch))
if do_yum:
os.system('set -x; cd %s.stat/repodata && cvs up comps.xml' % ftp_dir)
+ yum = '%s.stat/bin/createrepo -d -g %s.stat/repodata/comps.xml' % (ftp_dir, ftp_dir)
for tree in trees:
print '-------------------------- %s --------------------------' % tree
+ cachedir = '%s/tmp/createrepo/%s' % (home, tree)
for arch in all_ftp_archs:
print 'generate repodata for %s using createrepo' % arch
# Creating indexes for yum and other supporting xml repodata.
- os.system('time %s.stat/bin/createrepo -d -g %s.stat/repodata/comps.xml --cache %s/tmp/createrepo/%s-%s %s%s/%s/RPMS' %
- (ftp_dir,ftp_dir,home,tree,arch,ftp_dir,tree,arch))
+ os.system('time %s --cache %s-%s %s%s/%s/RPMS' % (yum, cachedir, arch, ftp_dir, tree, arch))
if do_rpmrepo:
os.system('set -x; cd %s.stat/repodata && cvs up comps.xml' % ftp_dir)
try:
tree=ftptree.FtpTree(sys.argv[1])
+ #tree.do_checkbuild=False
tree.mark4removal(sys.argv[2:])
tree.removepkgs()
except ftptree.SomeError:
sys.exit(1)
try:
- srctree=ftptree.FtpTree(sys.argv[1], loadall=True)
- dsttree=ftptree.FtpTree(sys.argv[2])
+ srctree = ftptree.FtpTree(sys.argv[1], loadall = True)
+ dsttree = ftptree.FtpTree(sys.argv[2])
srctree.mark4moving(sys.argv[3:])
except ftptree.SomeError:
# In case of problems we need to unlock the trees before exiting
ftpio.unlock(sys.argv[1])
ftpio.unlock(sys.argv[2])
-
import os, sys, config
def fileexists(path):
- if path[0]=='/':
- fullpath=path
+ if path[0] == '/':
+ fullpath = path
else:
- fullpath=config.ftp_dir+path
+ fullpath = config.ftp_dir + path
return os.path.exists(fullpath)
def checkdir(dir):
if not fileexists(dir):
- print 'ERR: ' + config.value['ftp_dir']+'/' + dir + " does not exist"
+ print >>sys.stderr, 'ERR: ' + config.value['ftp_dir']+'/' + dir + " does not exist"
sys.exit(1)
if 'HOME' in os.environ:
- ftpadmdir=os.environ['HOME']+'/pld-ftp-admin/'
+ ftpadmdir = os.environ['HOME'] + '/pld-ftp-admin/'
else:
- ftpadmdir='../'
-noarchcachedir=ftpadmdir+'var/noarch-cache/'
-tmpdir=ftpadmdir+'var/tmp/'
+ ftpadmdir = '../'
+# noarchcachedir is dir where noarch files contents are stored for AI
+# XXX: file reference where the AI resides
+noarchcachedir = ftpadmdir + 'var/noarch-cache/'
+
+tmpdir = ftpadmdir + 'var/tmp/'
import string, os
-value={}
+value = {}
if os.environ.has_key('HOME'):
- path=os.environ['HOME']
+ path = os.environ['HOME']
else:
- path='../../' # cgi-bin interface
+ path = '../../' # cgi-bin interface
-f=open(path+'/.ftpadmrc', 'r')
+f = open(path + '/.ftpadmrc', 'r')
for line in f.readlines():
if line[0] == '#' or string.find(line, '=') == -1:
continue
- tuple=string.split(string.strip(line), '=')
+ tuple = string.split(string.strip(line), '=')
if tuple[1][0] == '"':
- value[string.strip(tuple[0])]=tuple[1][1:-1]
+ value[string.strip(tuple[0])] = tuple[1][1:-1]
else:
- value[string.strip(tuple[0])]=string.strip(tuple[1])
+ value[string.strip(tuple[0])] = string.strip(tuple[1])
f.close()
-default_to=value['ftp_dir']+'/'+value['default_to']+'/'
-ftp_dir=value['ftp_dir']+'/'
-incoming_dir=value['ftp_dir']+'/'+value['incoming_dir']+'/'
-test_builds_dir=value['ftp_dir']+'/'+value['test_builds_dir']+'/'
-ftp_archs=value['ftp_archs'].split(' ')
+default_to = value['ftp_dir'] + '/' + value['default_to'] + '/'
+ftp_dir = value['ftp_dir'] + '/'
+incoming_dir = value['ftp_dir'] + '/' + value['incoming_dir'] + '/'
+test_builds_dir = value['ftp_dir'] + '/' + value['test_builds_dir'] + '/'
+ftp_archs = value['ftp_archs'].split(' ')
-builderqueue=value['builderqueue']
+builderqueue = value['builderqueue']
-if 'old_poldek' in value and value['old_poldek']=='yes':
- old_poldek=True
+if 'old_poldek' in value and value['old_poldek'] == 'yes':
+ old_poldek = True
else:
- old_poldek=False
+ old_poldek = False
if 'poldek_indexes' in value:
- poldek_indexes=value['poldek_indexes']
+ poldek_indexes = value['poldek_indexes']
else:
- poldek_indexes='old'
+ poldek_indexes = 'old'
-if value['separate_noarch']=='yes':
- separate_noarch=True
+if value['separate_noarch'] == 'yes':
+ separate_noarch = True
else:
- separate_noarch=False
+ separate_noarch = False
if separate_noarch:
- all_ftp_archs=['noarch'] + ftp_archs
+ all_ftp_archs = ['noarch'] + ftp_archs
else:
- all_ftp_archs=ftp_archs
-
+ all_ftp_archs = ftp_archs
os.remove(file)
except OSError, e:
pinfo("os.remove(%s): %s" % (file, e))
- raise
+ #raise
def mv(src, dst, test=False):
fsrc = src
class Pkg(BasePkg):
def __init__(self, nvr, tree):
BasePkg.__init__(self, nvr, tree)
- self.name=string.join(nvr.split('-')[:-2], '-')
- self.version=nvr.split('-')[-2]
- self.release=nvr.split('-')[-1]
- self.marked4removal=False
- self.marked4moving=False
- self.marked4movingpool=[]
- self.errors=[]
- self.warnings=[]
+ self.name = string.join(nvr.split('-')[:-2], '-')
+ self.version = nvr.split('-')[-2]
+ self.release = nvr.split('-')[-1]
+ self.marked4removal = False
+ self.marked4moving = False
+ self.marked4movingpool = []
+ self.errors = []
+ self.warnings = []
def __cmp__(self, pkg):
if self.name > pkg.name:
self.mark4moving()
def writeinfo(self):
- f=open(self.tree.basedir+'/SRPMS/.metadata/'+self.nvr+'.src.rpm.info', 'w')
+ f = open(self.tree.basedir+'/SRPMS/.metadata/'+self.nvr+'.src.rpm.info', 'w')
for bid in self.build.keys():
f.write("info:build:%s:requester:%s\ninfo:build:%s:requester_email:%s\n" % (bid, self.build[bid].requester, bid, self.build[bid].requester_email))
for key in self.info.keys():
for rpm in self.files[arch]:
f.write("file:%s:%s\n" % (arch, rpm))
- def remove(self, test=False):
+ def remove(self, test = False):
+ """
+ Remove package from ftp
+ """
for arch in self.files.keys():
for rpm in self.files[arch]:
- rm(self.tree.basedir+'/'+arch+'/RPMS/'+rpm, test)
- if arch=='noarch':
- if fileexists(noarchcachedir+rpm+'.filelist'):
- rm(noarchcachedir+rpm+'.filelist', test)
- if fileexists(noarchcachedir+rpm+'.reqlist'):
- rm(noarchcachedir+rpm+'.reqlist', test)
- rm(self.tree.basedir+'/SRPMS/.metadata/'+self.nvr+'.src.rpm.info', test)
+ rm(self.tree.basedir + '/' + arch + '/RPMS/' + rpm, test)
+ if arch == 'noarch':
+ if fileexists(noarchcachedir + rpm + '.filelist'):
+ rm(noarchcachedir + rpm + '.filelist', test)
+ if fileexists(noarchcachedir + rpm + '.reqlist'):
+ rm(noarchcachedir + rpm + '.reqlist', test)
+ rm(self.tree.basedir + '/SRPMS/.metadata/' + self.nvr + '.src.rpm.info', test)
def move(self, dsttree, test=False):
if dsttree.has_key(self.nvr):
- movedany=False
+ movedany = False
for arch in self.files.keys():
if arch in dsttree[self.nvr].files.keys():
msg = ""
msg = "TEST "
pinfo("%sArch %s for %s is already present in dest tree; removing from srctree" % (msg, arch, self.nvr))
for rpm in self.files[arch]:
- rm(self.tree.basedir+'/'+arch+'/RPMS/'+rpm, test)
+ rm(self.tree.basedir + '/' + arch + '/RPMS/' + rpm, test)
else:
- movedany=True
- dsttree[self.nvr].files[arch]=self.files[arch]
+ movedany = True
+ dsttree[self.nvr].files[arch] = self.files[arch]
for rpm in self.files[arch]:
- mv(self.tree.basedir+'/'+arch+'/RPMS/'+rpm, dsttree.basedir+'/'+arch+'/RPMS/', test)
+ mv(self.tree.basedir + '/' + arch + '/RPMS/' + rpm, dsttree.basedir + '/' + arch + '/RPMS/', test)
if not test and movedany:
for bid in self.build.keys():
- dsttree[self.nvr].build[bid]=self.build[bid]
+ dsttree[self.nvr].build[bid] = self.build[bid]
dsttree[self.nvr].writeinfo()
- rm(self.tree.basedir+'/SRPMS/.metadata/'+self.nvr+'.src.rpm.info', test)
+ rm(self.tree.basedir + '/SRPMS/.metadata/' + self.nvr + '.src.rpm.info', test)
else:
+ # move files
for arch in self.files.keys():
for rpm in self.files[arch]:
- mv(self.tree.basedir+'/'+arch+'/RPMS/'+rpm, dsttree.basedir+'/'+arch+'/RPMS/', test)
- mv(self.tree.basedir+'/SRPMS/.metadata/'+self.nvr+'.src.rpm.info', dsttree.basedir+'/SRPMS/.metadata/', test)
+ mv(self.tree.basedir + '/' + arch + '/RPMS/' + rpm, dsttree.basedir + '/' + arch + '/RPMS/', test)
+ # move metadata
+ mv(self.tree.basedir + '/SRPMS/.metadata/' + self.nvr + '.src.rpm.info', dsttree.basedir + '/SRPMS/.metadata/', test)
class FtpTree(BaseFtpTree):
def __init__(self, tree, loadall=False):
BaseFtpTree.__init__(self, tree)
- self.loadedpkgs={}
- self.marked4removal=[]
- self.marked4moving=[]
- self.pkgnames=[]
+ self.loadedpkgs = {}
+ self.marked4removal = []
+ self.marked4moving = []
+ self.pkgnames = []
self.__loadpkgnames()
if loadall:
for pkgname in self.pkgnames:
- self.loadedpkgs[pkgname]=Pkg(pkgname, self)
+ self.loadedpkgs[pkgname] = Pkg(pkgname, self)
# Tests:
- self.do_checkbuild=True
+ self.do_checkbuild = True
def __getitem__(self, key):
if self.loadedpkgs.has_key(key):
self.__checkbuild(self.marked4moving)
self.__checkarchs(dsttree, self.marked4moving)
- self.__rmolderfromsrc(test=True)
- self.__rmotherfromdst(dsttree, test=True)
+ self.__rmolderfromsrc(test = True)
+ self.__rmotherfromdst(dsttree, test = True)
for pkg in self.marked4moving:
- pkg.move(dsttree, test=True)
+ pkg.move(dsttree, test = True)
def movepkgs(self, dsttree):
if self.do_checkbuild:
return True
else:
return False
- list=filter(checkfiletype, os.listdir(self.basedir+'/SRPMS/.metadata'))
- self.pkgnames=map((lambda x: x[:-13]), list)
+ list = filter(checkfiletype, os.listdir(self.basedir+'/SRPMS/.metadata'))
+ self.pkgnames = map((lambda x: x[:-13]), list)
def __mark4something(self, wannabepkgs, markfunction):
def chopoffextension(pkg):
bailoutonerror()
def __checkbuild(self, marked):
- f=urllib.urlopen(config.builderqueue)
- #f=open('queue.txt')
- requests={}
- reid=re.compile(r'^.*id=(.*) pri.*$')
- regb=re.compile(r'^group:.*$|builders:.*$', re.M)
+ """
+ Checks queue file if all arches are built
+
+ Reads config.builderqueue to grab the info
+ """
+ f = urllib.urlopen(config.builderqueue)
+ requests = {}
+ reid = re.compile(r'^.*id=(.*) pri.*$')
+ regb = re.compile(r'^group:.*$|builders:.*$', re.M)
for i in re.findall(regb, f.read()):
- if i[0]=='g':
- id=reid.sub(r'\1', i)
- requests[id]=""
+ if i[0] == 'g':
+ id = reid.sub(r'\1', i)
+ requests[id] = ""
elif i[0]=='b':
- requests[id]=requests[id]+i
+ requests[id] = requests[id] + i
f.close()
+
for pkg in marked:
for bid in pkg.build.keys():
if requests.has_key(bid) and not requests[bid].find('?') == -1:
pkg.error("(buildid %s) building not finished" % bid)
def __checkarchs(self, dsttree, marked):
+ """
+ Checks marked pkgs it is built on all archs.
+ """
for pkg in marked:
if len(pkg.files.keys()) <= 1:
pkg.error('has only src.rpm built')
continue
- otherpkgnames=self.__find_other_pkgs(pkg, dsttree)
- if otherpkgnames: # check if we're not removing some archs
- curarchs=[]
- missingarchs=[]
+ otherpkgnames = self.__find_other_pkgs(pkg, dsttree)
+
+ # check if we're not removing some archs
+ if otherpkgnames:
+ curarchs = []
+ missingarchs = []
for somepkg in otherpkgnames:
curarchs.extend(Pkg(somepkg, dsttree).files.keys())
for arch in curarchs:
missingarchs.append(arch)
if missingarchs:
pkg.error('moving would remove archs: %s' % missingarchs)
- else: # warn if a package isn't built for all archs
- if (config.separate_noarch and 'noarch' in pkg.files.keys() and
- len(pkg.files.keys())==2):
+ else:
+ # warn if a package isn't built for all archs
+ if (config.separate_noarch and 'noarch' in pkg.files.keys() and len(pkg.files.keys()) == 2):
continue
- elif len(pkg.files.keys()) != len(config.ftp_archs)+1:
- missingarchs=[]
+ elif len(pkg.files.keys()) != len(config.ftp_archs) + 1:
+ missingarchs = []
for arch in config.ftp_archs:
if arch not in pkg.files.keys():
missingarchs.append(arch)
def __rmolderfromsrc(self, test=False):
for pkg in self.marked4moving:
- olderpkgnames=self.__find_older_pkgs(pkg)
+ olderpkgnames = self.__find_older_pkgs(pkg)
for i in olderpkgnames:
Pkg(i, self).remove(test)
- def __rmotherfromdst(self, dsttree, test=False):
+ def __rmotherfromdst(self, dsttree, test = False):
for pkg in self.marked4moving:
- pkgnames=self.__find_other_pkgs(pkg, dsttree)
+ pkgnames = self.__find_other_pkgs(pkg, dsttree)
for i in pkgnames:
Pkg(i, dsttree).remove(test)
# Used more than once filter functions
-
def __find_other_pkgs(self, pkg, tree):
- escapedpkgname=pkg.name.replace('.', '\.').replace('+', '\+')
- ziewre=re.compile(escapedpkgname+'-[^-]*-[^-]*$')
+ escapedpkgname = pkg.name.replace('.', '\.').replace('+', '\+')
+ ziewre = re.compile(escapedpkgname + '-[^-]*-[^-]*$')
def filter_other_pkgs(x):
if ziewre.match(x) and not x == pkg.nvr:
return True
def __find_older_pkgs(self, pkg):
def filter_older_pkgs(x):
- c=x.split('-')
+ c = x.split('-')
rc = rpm.labelCompare(('0', pkg.version, pkg.release),
('0', c[-2], c[-1]))
if rc == 1: # pkg > x