summaryrefslogtreecommitdiffstats
path: root/pym/dbapi/ebuild.py
diff options
context:
space:
mode:
Diffstat (limited to 'pym/dbapi/ebuild.py')
-rw-r--r--pym/dbapi/ebuild.py667
1 files changed, 667 insertions, 0 deletions
diff --git a/pym/dbapi/ebuild.py b/pym/dbapi/ebuild.py
new file mode 100644
index 000000000..d96d6a120
--- /dev/null
+++ b/pym/dbapi/ebuild.py
@@ -0,0 +1,667 @@
+from dbapi import dbapi
+from portage import settings, config, auxdbkeys, auxdbkeylen, doebuild, eapi_is_supported, flatten, listdir
+from portage_data import portage_gid
+from portage_util import ensure_dirs, apply_recursive_permissions, writemsg
+from portage_versions import pkgsplit, catpksplit, best
+from portage_dep import use_reduce, paren_reduce, dep_expand, dep_getkey, dep_getslot, match_from_list, match_to_list
+from portage_manifest import Manifest
+from output import red
+
+import eclass_cache, portage_exception, portage_gpg, portage_locks, portage_checksum
+import os, sys
+
+def close_portdbapi_caches():
+ for i in portdbapi.portdbapi_instances:
+ i.close_caches()
+
+
+class portdbapi(dbapi):
+ """this tree will scan a portage directory located at root (passed to init)"""
+ portdbapi_instances = []
+
+ def __init__(self,porttree_root,mysettings=None):
+ portdbapi.portdbapi_instances.append(self)
+
+ if mysettings:
+ self.mysettings = mysettings
+ else:
+ global settings
+ self.mysettings = config(clone=settings)
+
+ # This is strictly for use in aux_get() doebuild calls when metadata
+ # is generated by the depend phase. It's safest to use a clone for
+ # this purpose because doebuild makes many changes to the config
+ # instance that is passed in.
+ self.doebuild_settings = config(clone=self.mysettings)
+
+ self.manifestVerifyLevel = None
+ self.manifestVerifier = None
+ self.manifestCache = {} # {location: [stat, md5]}
+ self.manifestMissingCache = []
+
+ if "gpg" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.EXISTS
+ if "strict" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.MARGINAL
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+ elif "severe" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.TRUSTED
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
+ else:
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+
+ #self.root=settings["PORTDIR"]
+ self.porttree_root = os.path.realpath(porttree_root)
+
+ self.depcachedir = self.mysettings.depcachedir[:]
+
+ self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
+ if self.tmpfs and not os.path.exists(self.tmpfs):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
+ self.tmpfs = None
+
+ self.eclassdb = eclass_cache.cache(self.porttree_root,
+ overlays=self.mysettings["PORTDIR_OVERLAY"].split())
+
+ self.metadb = {}
+ self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
+
+ #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+ self.xcache={}
+ self.frozen=0
+
+ self.porttrees = [self.porttree_root] + \
+ [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
+ self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
+ self.auxdb = {}
+ self._init_cache_dirs()
+ # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
+ # ~harring
+ filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
+ for x in self.porttrees:
+ # location, label, auxdbkeys
+ self.auxdb[x] = self.auxdbmodule(self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
+ self._gvisible_aux_cache = {}
+
+ def _init_cache_dirs(self):
+ """Create /var/cache/edb/dep and adjust permissions for the portage
+ group."""
+
+ dirmode = 02070
+ filemode = 060
+ modemask = 02
+
+ try:
+ for mydir in (self.depcachedir,):
+ if ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
+ writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
+ noiselevel=-1)
+ def onerror(e):
+ raise # bail out on the first error that occurs during recursion
+ if not apply_recursive_permissions(mydir,
+ gid=portage_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise portage_exception.OperationNotPermitted(
+ "Failed to apply recursive permissions for the portage group.")
+ except portage_exception.PortageException, e:
+ pass
+
+ def close_caches(self):
+ for x in self.auxdb.keys():
+ self.auxdb[x].sync()
+ self.auxdb.clear()
+
+ def flush_cache(self):
+ self.metadb = {}
+ self.auxdb = {}
+
+ def finddigest(self,mycpv):
+ try:
+ mydig = self.findname2(mycpv)[0]
+ mydigs = mydig.split("/")[:-1]
+ mydig = "/".join(mydigs)
+
+ mysplit = mycpv.split("/")
+ except SystemExit, e:
+ raise
+ except:
+ return ""
+ return mydig+"/files/digest-"+mysplit[-1]
+
+ def findname(self,mycpv):
+ return self.findname2(mycpv)[0]
+
+ def findname2(self, mycpv, mytree=None):
+ """
+ Returns the location of the CPV, and what overlay it was in.
+ Searches overlays first, then PORTDIR; this allows us to return the first
+ matching file. As opposed to starting in portdir and then doing overlays
+ second, we would have to exhaustively search the overlays until we found
+ the file we wanted.
+ """
+ if not mycpv:
+ return "",0
+ mysplit=mycpv.split("/")
+ psplit=pkgsplit(mysplit[1])
+
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = self.porttrees[:]
+ mytrees.reverse()
+ if psplit:
+ for x in mytrees:
+ file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
+ if os.access(file, os.R_OK):
+ return[file, x]
+ return None, 0
+
+ def aux_get(self, mycpv, mylist, mytree=None):
+ "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
+ global auxdbkeys,auxdbkeylen
+ cat,pkg = mycpv.split("/", 1)
+
+ myebuild, mylocation = self.findname2(mycpv, mytree)
+
+ if not myebuild:
+ writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
+ noiselevel=1)
+ writemsg("!!! %s\n" % myebuild, noiselevel=1)
+ raise KeyError(mycpv)
+
+ myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
+ if "gpg" in self.mysettings.features:
+ try:
+ mys = portage_gpg.fileStats(myManifestPath)
+ if (myManifestPath in self.manifestCache) and \
+ (self.manifestCache[myManifestPath] == mys):
+ pass
+ elif self.manifestVerifier:
+ if not self.manifestVerifier.verify(myManifestPath):
+ # Verification failed the desired level.
+ raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
+
+ if ("severe" in self.mysettings.features) and \
+ (mys != portage_gpg.fileStats(myManifestPath)):
+ raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
+
+ except portage_exception.InvalidSignature, e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise
+ writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
+ except portage_exception.MissingSignature, e:
+ if ("severe" in self.mysettings.features):
+ raise
+ if ("strict" in self.mysettings.features):
+ if myManifestPath not in self.manifestMissingCache:
+ writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
+ self.manifestMissingCache.insert(0,myManifestPath)
+ except (OSError,portage_exception.FileNotFound), e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
+ writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
+ noiselevel=-1)
+
+
+ if os.access(myebuild, os.R_OK):
+ emtime=os.stat(myebuild)[stat.ST_MTIME]
+ else:
+ writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
+ noiselevel=-1)
+ writemsg("!!! %s\n" % myebuild,
+ noiselevel=-1)
+ raise KeyError
+
+ try:
+ mydata = self.auxdb[mylocation][mycpv]
+ if emtime != long(mydata.get("_mtime_", 0)):
+ doregen = True
+ elif len(mydata.get("_eclasses_", [])) > 0:
+ doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
+ else:
+ doregen = False
+
+ except KeyError:
+ doregen = True
+ except CacheError:
+ doregen = True
+ try: del self.auxdb[mylocation][mycpv]
+ except KeyError: pass
+
+ writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
+
+ if doregen:
+ writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
+ writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
+
+ if self.tmpfs:
+ mydbkey = self.tmpfs+"/aux_db_key_temp"
+ else:
+ mydbkey = self.depcachedir+"/aux_db_key_temp"
+
+ mylock = None
+ try:
+ mylock = portage_locks.lockfile(mydbkey, wantnewlockfile=1)
+ try:
+ os.unlink(mydbkey)
+ except (IOError, OSError), e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ self.doebuild_settings.reset()
+ myret = doebuild(myebuild, "depend", "/",
+ self.doebuild_settings, dbkey=mydbkey, tree="porttree",
+ mydbapi=self)
+ if myret != os.EX_OK:
+ #depend returned non-zero exit code...
+ writemsg((red("\naux_get():") + \
+ " (0) Error in '%s'. (%s)\n" + \
+ " Check for syntax error or " + \
+ "corruption in the ebuild. (--debug)\n\n") % \
+ (myebuild, myret), noiselevel=-1)
+ raise KeyError(mycpv)
+
+ try:
+ mycent = open(mydbkey, "r")
+ os.unlink(mydbkey)
+ mylines = mycent.readlines()
+ mycent.close()
+ except (IOError, OSError):
+ writemsg((red("\naux_get():") + \
+ " (1) Error in '%s' ebuild.\n" + \
+ " Check for syntax error or " + \
+ "corruption in the ebuild. (--debug)\n\n") % myebuild,
+ noiselevel=-1)
+ raise KeyError(mycpv)
+ finally:
+ if mylock:
+ portage_locks.unlockfile(mylock)
+
+ mydata = {}
+ for x in range(0,len(mylines)):
+ if mylines[x][-1] == '\n':
+ mylines[x] = mylines[x][:-1]
+ mydata[auxdbkeys[x]] = mylines[x]
+
+ if "EAPI" not in mydata or not mydata["EAPI"].strip():
+ mydata["EAPI"] = "0"
+
+ if not eapi_is_supported(mydata["EAPI"]):
+ # if newer version, wipe everything and negate eapi
+ eapi = mydata["EAPI"]
+ mydata = {}
+ map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
+ mydata["EAPI"] = "-"+eapi
+
+ if mydata.get("INHERITED", False):
+ mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
+ else:
+ mydata["_eclasses_"] = {}
+
+ del mydata["INHERITED"]
+
+ mydata["_mtime_"] = emtime
+
+ self.auxdb[mylocation][mycpv] = mydata
+
+ #finally, we look at our internal cache entry and return the requested data.
+ returnme = []
+ for x in mylist:
+ if x == "INHERITED":
+ returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
+ else:
+ returnme.append(mydata.get(x,""))
+
+ if "EAPI" in mylist:
+ idx = mylist.index("EAPI")
+ if not returnme[idx]:
+ returnme[idx] = "0"
+
+ return returnme
+
+ def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
+ if mysettings is None:
+ mysettings = self.mysettings
+ try:
+ myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
+ except (IOError,KeyError):
+ print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
+ sys.exit(1)
+
+ if useflags is None:
+ useflags = mysettings["USE"].split()
+
+ myurilist = paren_reduce(myuris)
+ myurilist = use_reduce(myurilist,uselist=useflags,matchall=all)
+ newuris = flatten(myurilist)
+
+ myfiles = []
+ for x in newuris:
+ mya = os.path.basename(x)
+ if not mya in myfiles:
+ myfiles.append(mya)
+ return [newuris, myfiles]
+
+ def getfetchsizes(self,mypkg,useflags=None,debug=0):
+ # returns a filename:size dictionnary of remaining downloads
+ myebuild = self.findname(mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
+ checksums = mf.getDigests()
+ if not checksums:
+ if debug: print "[empty/missing/bad digest]: "+mypkg
+ return None
+ filesdict={}
+ if useflags is None:
+ myuris, myfiles = self.getfetchlist(mypkg,all=1)
+ else:
+ myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
+ #XXX: maybe this should be improved: take partial downloads
+ # into account? check checksums?
+ for myfile in myfiles:
+ if myfile not in checksums:
+ if debug:
+ writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
+ continue
+ file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
+ mystat = None
+ try:
+ mystat = os.stat(file_path)
+ except OSError, e:
+ pass
+ if mystat is None:
+ existing_size = 0
+ else:
+ existing_size = mystat.st_size
+ remaining_size = int(checksums[myfile]["size"]) - existing_size
+ if remaining_size > 0:
+ # Assume the download is resumable.
+ filesdict[myfile] = remaining_size
+ elif remaining_size < 0:
+ # The existing file is too large and therefore corrupt.
+ filesdict[myfile] = int(checksums[myfile]["size"])
+ return filesdict
+
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
+ if not useflags:
+ if mysettings:
+ useflags = mysettings["USE"].split()
+ myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
+ myebuild = self.findname(mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
+ mysums = mf.getDigests()
+
+ failures = {}
+ for x in myfiles:
+ if not mysums or x not in mysums:
+ ok = False
+ reason = "digest missing"
+ else:
+ try:
+ ok, reason = portage_checksum.verify_all(
+ os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
+ except portage_exception.FileNotFound, e:
+ ok = False
+ reason = "File Not Found: '%s'" % str(e)
+ if not ok:
+ failures[x] = reason
+ if failures:
+ return False
+ return True
+
+ def getsize(self,mypkg,useflags=None,debug=0):
+ # returns the total size of remaining downloads
+ #
+ # we use getfetchsizes() now, so this function would be obsoleted
+ #
+ filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
+ if filesdict is None:
+ return "[empty/missing/bad digest]"
+ mysize=0
+ for myfile in filesdict.keys():
+ mysum+=filesdict[myfile]
+ return mysum
+
+ def cpv_exists(self,mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ cps2=mykey.split("/")
+ cps=catpkgsplit(mykey,silent=0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
+ if self.findname(cps[0]+"/"+cps2[1]):
+ return 1
+ else:
+ return 0
+
+ def cp_all(self):
+ "returns a list of all keys in our tree"
+ d={}
+ for x in self.mysettings.categories:
+ for oroot in self.porttrees:
+ for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
+ d[x+"/"+y] = None
+ l = d.keys()
+ l.sort()
+ return l
+
+ def p_list(self,mycp):
+ d={}
+ for oroot in self.porttrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x[-7:]==".ebuild":
+ d[x[:-7]] = None
+ return d.keys()
+
+ def cp_list(self, mycp, use_cache=1, mytree=None):
+ mysplit=mycp.split("/")
+ d={}
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = self.porttrees
+ for oroot in mytrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x[-7:]==".ebuild":
+ d[mysplit[0]+"/"+x[:-7]] = None
+ return d.keys()
+
+ def freeze(self):
+ for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
+ self.xcache[x]={}
+ self.frozen=1
+
+ def melt(self):
+ self.xcache={}
+ self.frozen=0
+
+ def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+ "caching match function; very trick stuff"
+ #if no updates are being made to the tree, we can consult our xcache...
+ if self.frozen:
+ try:
+ return self.xcache[level][origdep]
+ except KeyError:
+ pass
+
+ if not mydep:
+ #this stuff only runs on first call of xmatch()
+ #create mydep, mykey from origdep
+ mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
+ mykey=dep_getkey(mydep)
+
+ if level=="list-visible":
+ #a list of all visible packages, not called directly (just by xmatch())
+ #myval=self.visible(self.cp_list(mykey))
+ myval=self.gvisible(self.visible(self.cp_list(mykey)))
+ elif level=="bestmatch-visible":
+ #dep match -- best match of all visible packages
+ myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
+ #get all visible matches (from xmatch()), then choose the best one
+ elif level=="bestmatch-list":
+ #dep match -- find best match but restrict search to sublist
+ myval=best(match_from_list(mydep,mylist))
+ #no point is calling xmatch again since we're not caching list deps
+ elif level=="match-list":
+ #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+ myval=match_from_list(mydep,mylist)
+ elif level=="match-visible":
+ #dep match -- find all visible matches
+ myval=match_from_list(mydep,self.xmatch("list-visible",None,mydep=mydep,mykey=mykey))
+ #get all visible packages, then get the matching ones
+ elif level=="match-all":
+ #match *all* visible *and* masked packages
+ myval=match_from_list(mydep,self.cp_list(mykey))
+ else:
+ print "ERROR: xmatch doesn't handle",level,"query!"
+ raise KeyError
+ myslot = dep_getslot(mydep)
+ if myslot is not None:
+ myval = [cpv for cpv in myval \
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot]
+ if self.frozen and (level not in ["match-list","bestmatch-list"]):
+ self.xcache[level][mydep]=myval
+ return myval
+
+ def match(self,mydep,use_cache=1):
+ return self.xmatch("match-visible",mydep)
+
+ def visible(self,mylist):
+ """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
+ packages file to remove invisible entries, returning remaining items. This function assumes
+ that all entries in mylist have the same category and package name."""
+ if (mylist is None) or (len(mylist)==0):
+ return []
+ newlist=mylist[:]
+ #first, we mask out packages in the package.mask file
+ mykey=newlist[0]
+ cpv=catpkgsplit(mykey)
+ if not cpv:
+ #invalid cat/pkg-v
+ print "visible(): invalid cat/pkg-v:",mykey
+ return []
+ mycp=cpv[0]+"/"+cpv[1]
+ maskdict=self.mysettings.pmaskdict
+ unmaskdict=self.mysettings.punmaskdict
+ if maskdict.has_key(mycp):
+ for x in maskdict[mycp]:
+ mymatches=self.xmatch("match-all",x)
+ if mymatches is None:
+ #error in package.mask file; print warning and continue:
+ print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
+ continue
+ for y in mymatches:
+ unmask=0
+ if unmaskdict.has_key(mycp):
+ for z in unmaskdict[mycp]:
+ mymatches_unmask=self.xmatch("match-all",z)
+ if y in mymatches_unmask:
+ unmask=1
+ break
+ if unmask==0:
+ try:
+ newlist.remove(y)
+ except ValueError:
+ pass
+
+ revmaskdict=self.mysettings.prevmaskdict
+ if revmaskdict.has_key(mycp):
+ for x in revmaskdict[mycp]:
+ #important: only match against the still-unmasked entries...
+ #notice how we pass "newlist" to the xmatch() call below....
+ #Without this, ~ deps in the packages files are broken.
+ mymatches=self.xmatch("match-list",x,mylist=newlist)
+ if mymatches is None:
+ #error in packages file; print warning and continue:
+ print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
+ continue
+ pos=0
+ while pos<len(newlist):
+ if newlist[pos] not in mymatches:
+ del newlist[pos]
+ else:
+ pos += 1
+ return newlist
+
+ def gvisible(self,mylist):
+ "strip out group-masked (not in current group) entries"
+
+ if mylist is None:
+ return []
+ newlist=[]
+
+ accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
+ pkgdict = self.mysettings.pkeywordsdict
+ for mycpv in mylist:
+ #we need to update this next line when we have fully integrated the new db api
+ auxerr=0
+ keys = None
+ eapi = None
+ aux_cache = self._gvisible_aux_cache.get(mycpv)
+ if aux_cache is not None:
+ keys, eapi = aux_cache
+ else:
+ try:
+ keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
+ except KeyError:
+ pass
+ except portage_exception.PortageException, e:
+ writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % mycpv,
+ noiselevel=-1)
+ writemsg("!!! %s\n" % str(e),
+ noiselevel=-1)
+ self._gvisible_aux_cache[mycpv] = (keys, eapi)
+ if not keys:
+ # KEYWORDS=""
+ #print "!!! No KEYWORDS for "+str(mycpv)+" -- Untested Status"
+ continue
+ mygroups=keys.split()
+ # Repoman may modify this attribute as necessary.
+ pgroups = accept_keywords[:]
+ match=0
+ cp = dep_getkey(mycpv)
+ if pkgdict.has_key(cp):
+ matches = match_to_list(mycpv, pkgdict[cp].keys())
+ for atom in matches:
+ pgroups.extend(pkgdict[cp][atom])
+ if matches:
+ inc_pgroups = []
+ for x in pgroups:
+ if x != "-*" and x.startswith("-"):
+ try:
+ inc_pgroups.remove(x[1:])
+ except ValueError:
+ pass
+ if x not in inc_pgroups:
+ inc_pgroups.append(x)
+ pgroups = inc_pgroups
+ del inc_pgroups
+ hasstable = False
+ hastesting = False
+ for gp in mygroups:
+ if gp=="*":
+ writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
+ noiselevel=-1)
+ match=1
+ break
+ elif gp in pgroups:
+ match=1
+ break
+ elif gp[0] == "~":
+ hastesting = True
+ elif gp[0] != "-":
+ hasstable = True
+ if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups)):
+ match=1
+ if match and eapi_is_supported(eapi):
+ newlist.append(mycpv)
+ return newlist