summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarius Mauch <genone@gentoo.org>2007-02-10 18:18:48 +0000
committerMarius Mauch <genone@gentoo.org>2007-02-10 18:18:48 +0000
commit17761b7794ed015d3e1ff7aa5551449537f71f89 (patch)
tree40f8a835dc4d09e29d976ad5551d61693a0e8c56
parent662e913455dc14125a95792fea3287cb1de27f1a (diff)
downloadportage-17761b7794ed015d3e1ff7aa5551449537f71f89.tar.gz
portage-17761b7794ed015d3e1ff7aa5551449537f71f89.tar.bz2
portage-17761b7794ed015d3e1ff7aa5551449537f71f89.zip
Move dbapi and related classes out of __init__.py into their own subpackage.
Also perform some minor cleanups. svn path=/main/trunk/; revision=5939
-rw-r--r--pym/portage/__init__.py3212
-rw-r--r--pym/portage/dbapi/__init__.py53
-rw-r--r--pym/portage/dbapi/bintree.py520
-rw-r--r--pym/portage/dbapi/porttree.py781
-rw-r--r--pym/portage/dbapi/vartree.py1837
-rw-r--r--pym/portage/dbapi/virtual.py89
6 files changed, 3285 insertions, 3207 deletions
diff --git a/pym/portage/__init__.py b/pym/portage/__init__.py
index 45914dbe2..7f0ed6fd3 100644
--- a/pym/portage/__init__.py
+++ b/pym/portage/__init__.py
@@ -4645,955 +4645,6 @@ def getmaskingstatus(mycpv, settings=None, portdb=None):
rValue.append(kmask+" keyword")
return rValue
-class portagetree:
- def __init__(self, root="/", virtual=None, clone=None, settings=None):
- """
- Constructor for a PortageTree
-
- @param root: ${ROOT}, defaults to '/', see make.conf(5)
- @type root: String/Path
- @param virtual: UNUSED
- @type virtual: No Idea
- @param clone: Set this if you want a copy of Clone
- @type clone: Existing portagetree Instance
- @param settings: Portage Configuration object (portage.settings)
- @type settings: Instance of portage.config
- """
-
- if clone:
- self.root = clone.root
- self.portroot = clone.portroot
- self.pkglines = clone.pkglines
- else:
- self.root = root
- if settings is None:
- settings = globals()["settings"]
- self.settings = settings
- self.portroot = settings["PORTDIR"]
- self.virtual = virtual
- self.dbapi = portdbapi(
- settings["PORTDIR"], mysettings=settings)
-
- def dep_bestmatch(self,mydep):
- "compatibility method"
- mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
- if mymatch is None:
- return ""
- return mymatch
-
- def dep_match(self,mydep):
- "compatibility method"
- mymatch=self.dbapi.xmatch("match-visible",mydep)
- if mymatch is None:
- return []
- return mymatch
-
- def exists_specific(self,cpv):
- return self.dbapi.cpv_exists(cpv)
-
- def getallnodes(self):
- """new behavior: these are all *unmasked* nodes. There may or may not be available
- masked package for nodes in this nodes list."""
- return self.dbapi.cp_all()
-
- def getname(self,pkgname):
- "returns file location for this particular package (DEPRECATED)"
- if not pkgname:
- return ""
- mysplit=pkgname.split("/")
- psplit=pkgsplit(mysplit[1])
- return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
-
- def resolve_specific(self,myspec):
- cps=catpkgsplit(myspec)
- if not cps:
- return None
- mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
- settings=self.settings)
- mykey=mykey+"-"+cps[2]
- if cps[3]!="r0":
- mykey=mykey+"-"+cps[3]
- return mykey
-
- def depcheck(self,mycheck,use="yes",myusesplit=None):
- return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
-
- def getslot(self,mycatpkg):
- "Get a slot for a catpkg; assume it exists."
- myslot = ""
- try:
- myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
- except SystemExit, e:
- raise
- except Exception, e:
- pass
- return myslot
-
-
-class dbapi:
- def __init__(self):
- pass
-
- def close_caches(self):
- pass
-
- def cp_list(self,cp,use_cache=1):
- return
-
- def cpv_all(self):
- cpv_list = []
- for cp in self.cp_all():
- cpv_list.extend(self.cp_list(cp))
- return cpv_list
-
- def aux_get(self,mycpv,mylist):
- "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
- 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
- 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
- raise NotImplementedError
-
- def match(self,origdep,use_cache=1):
- mydep = dep_expand(origdep, mydb=self, settings=self.settings)
- mykey=dep_getkey(mydep)
- mylist = match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
- myslot = portage.dep.dep_getslot(mydep)
- if myslot is not None:
- mylist = [cpv for cpv in mylist \
- if self.aux_get(cpv, ["SLOT"])[0] == myslot]
- return mylist
-
- def match2(self,mydep,mykey,mylist):
- writemsg("DEPRECATED: dbapi.match2\n")
- match_from_list(mydep,mylist)
-
- def invalidentry(self, mypath):
- if re.search("portage_lockfile$",mypath):
- if not os.environ.has_key("PORTAGE_MASTER_PID"):
- writemsg("Lockfile removed: %s\n" % mypath, 1)
- portage.locks.unlockfile((mypath,None,None))
- else:
- # Nothing we can do about it. We're probably sandboxed.
- pass
- elif re.search(".*/-MERGING-(.*)",mypath):
- if os.path.exists(mypath):
- writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n", noiselevel=-1)
- else:
- writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
-
-
-
-class fakedbapi(dbapi):
- "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
- def __init__(self, settings=None):
- self.cpvdict={}
- self.cpdict={}
- if settings is None:
- settings = globals()["settings"]
- self.settings = settings
- self._match_cache = {}
-
- def _clear_cache(self):
- if self._match_cache:
- self._match_cache = {}
-
- def match(self, origdep, use_cache=1):
- result = self._match_cache.get(origdep, None)
- if result is not None:
- return result[:]
- result = dbapi.match(self, origdep, use_cache=use_cache)
- self._match_cache[origdep] = result
- return result[:]
-
- def cpv_exists(self,mycpv):
- return self.cpvdict.has_key(mycpv)
-
- def cp_list(self,mycp,use_cache=1):
- if not self.cpdict.has_key(mycp):
- return []
- else:
- return self.cpdict[mycp]
-
- def cp_all(self):
- returnme=[]
- for x in self.cpdict.keys():
- returnme.extend(self.cpdict[x])
- return returnme
-
- def cpv_all(self):
- return self.cpvdict.keys()
-
- def cpv_inject(self, mycpv, metadata=None):
- """Adds a cpv from the list of available packages."""
- self._clear_cache()
- mycp=cpv_getkey(mycpv)
- self.cpvdict[mycpv] = metadata
- myslot = None
- if metadata:
- myslot = metadata.get("SLOT", None)
- if myslot and mycp in self.cpdict:
- # If necessary, remove another package in the same SLOT.
- for cpv in self.cpdict[mycp]:
- if mycpv != cpv:
- other_metadata = self.cpvdict[cpv]
- if other_metadata:
- if myslot == other_metadata.get("SLOT", None):
- self.cpv_remove(cpv)
- break
- if mycp not in self.cpdict:
- self.cpdict[mycp] = []
- if not mycpv in self.cpdict[mycp]:
- self.cpdict[mycp].append(mycpv)
-
- def cpv_remove(self,mycpv):
- """Removes a cpv from the list of available packages."""
- self._clear_cache()
- mycp=cpv_getkey(mycpv)
- if self.cpvdict.has_key(mycpv):
- del self.cpvdict[mycpv]
- if not self.cpdict.has_key(mycp):
- return
- while mycpv in self.cpdict[mycp]:
- del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
- if not len(self.cpdict[mycp]):
- del self.cpdict[mycp]
-
- def aux_get(self, mycpv, wants):
- if not self.cpv_exists(mycpv):
- raise KeyError(mycpv)
- metadata = self.cpvdict[mycpv]
- if not metadata:
- return ["" for x in wants]
- return [metadata.get(x, "") for x in wants]
-
- def aux_update(self, cpv, values):
- self._clear_cache()
- self.cpvdict[cpv].update(values)
-
-class bindbapi(fakedbapi):
- def __init__(self, mybintree=None, settings=None):
- self.bintree = mybintree
- self.cpvdict={}
- self.cpdict={}
- if settings is None:
- settings = globals()["settings"]
- self.settings = settings
- self._match_cache = {}
- # Selectively cache metadata in order to optimize dep matching.
- self._aux_cache_keys = set(["SLOT"])
- self._aux_cache = {}
-
- def match(self, *pargs, **kwargs):
- if self.bintree and not self.bintree.populated:
- self.bintree.populate()
- return fakedbapi.match(self, *pargs, **kwargs)
-
- def aux_get(self,mycpv,wants):
- if self.bintree and not self.bintree.populated:
- self.bintree.populate()
- cache_me = False
- if not set(wants).difference(self._aux_cache_keys):
- aux_cache = self._aux_cache.get(mycpv)
- if aux_cache is not None:
- return [aux_cache[x] for x in wants]
- cache_me = True
- mysplit = mycpv.split("/")
- mylist = []
- tbz2name = mysplit[1]+".tbz2"
- if self.bintree and not self.bintree.isremote(mycpv):
- tbz2 = portage.xpak.tbz2(self.bintree.getname(mycpv))
- getitem = tbz2.getfile
- else:
- getitem = self.bintree.remotepkgs[tbz2name].get
- mydata = {}
- mykeys = wants
- if cache_me:
- mykeys = self._aux_cache_keys.union(wants)
- for x in mykeys:
- myval = getitem(x)
- # myval is None if the key doesn't exist
- # or the tbz2 is corrupt.
- if myval:
- mydata[x] = " ".join(myval.split())
- if "EAPI" in mykeys:
- if not mydata.setdefault("EAPI", "0"):
- mydata["EAPI"] = "0"
- if cache_me:
- aux_cache = {}
- for x in self._aux_cache_keys:
- aux_cache[x] = mydata.get(x, "")
- self._aux_cache[mycpv] = aux_cache
- return [mydata.get(x, "") for x in wants]
-
- def aux_update(self, cpv, values):
- if not self.bintree.populated:
- self.bintree.populate()
- tbz2path = self.bintree.getname(cpv)
- if not os.path.exists(tbz2path):
- raise KeyError(cpv)
- mytbz2 = portage.xpak.tbz2(tbz2path)
- mydata = mytbz2.get_data()
- mydata.update(values)
- mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
-
- def cp_list(self, *pargs, **kwargs):
- if not self.bintree.populated:
- self.bintree.populate()
- return fakedbapi.cp_list(self, *pargs, **kwargs)
-
- def cpv_all(self):
- if not self.bintree.populated:
- self.bintree.populate()
- return fakedbapi.cpv_all(self)
-
-class vardbapi(dbapi):
- def __init__(self, root, categories=None, settings=None, vartree=None):
- self.root = root[:]
- #cache for category directory mtimes
- self.mtdircache = {}
- #cache for dependency checks
- self.matchcache = {}
- #cache for cp_list results
- self.cpcache = {}
- self.blockers = None
- if settings is None:
- settings = globals()["settings"]
- self.settings = settings
- if categories is None:
- categories = settings.categories
- self.categories = categories[:]
- if vartree is None:
- vartree = globals()["db"][root]["vartree"]
- self.vartree = vartree
- self._aux_cache_keys = set(["SLOT", "COUNTER", "PROVIDE", "USE",
- "IUSE", "DEPEND", "RDEPEND", "PDEPEND"])
- self._aux_cache = None
- self._aux_cache_version = "1"
- self._aux_cache_filename = os.path.join(self.root,
- CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
-
- def cpv_exists(self,mykey):
- "Tells us whether an actual ebuild exists on disk (no masking)"
- return os.path.exists(self.root+VDB_PATH+"/"+mykey)
-
- def cpv_counter(self,mycpv):
- "This method will grab the COUNTER. Returns a counter value."
- try:
- return long(self.aux_get(mycpv, ["COUNTER"])[0])
- except KeyError, ValueError:
- pass
- cdir=self.root+VDB_PATH+"/"+mycpv
- cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
-
- # We write our new counter value to a new file that gets moved into
- # place to avoid filesystem corruption on XFS (unexpected reboot.)
- corrupted=0
- if os.path.exists(cpath):
- cfile=open(cpath, "r")
- try:
- counter=long(cfile.readline())
- except ValueError:
- print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
- counter=long(0)
- corrupted=1
- cfile.close()
- elif os.path.exists(cdir):
- mys = pkgsplit(mycpv)
- myl = self.match(mys[0],use_cache=0)
- print mys,myl
- if len(myl) == 1:
- try:
- # Only one package... Counter doesn't matter.
- write_atomic(cpath, "1")
- counter = 1
- except SystemExit, e:
- raise
- except Exception, e:
- writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
- noiselevel=-1)
- writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
- noiselevel=-1)
- writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
- writemsg("!!! %s\n" % e, noiselevel=-1)
- sys.exit(1)
- else:
- writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
- noiselevel=-1)
- writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
- noiselevel=-1)
- writemsg("!!! remerge the package.\n", noiselevel=-1)
- sys.exit(1)
- else:
- counter=long(0)
- if corrupted:
- # update new global counter file
- write_atomic(cpath, str(counter))
- return counter
-
- def cpv_inject(self,mycpv):
- "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
- os.makedirs(self.root+VDB_PATH+"/"+mycpv)
- counter = self.counter_tick(self.root, mycpv=mycpv)
- # write local package counter so that emerge clean does the right thing
- write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
-
- def isInjected(self,mycpv):
- if self.cpv_exists(mycpv):
- if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
- return True
- if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
- return True
- return False
-
- def move_ent(self,mylist):
- origcp=mylist[1]
- newcp=mylist[2]
-
- # sanity check
- for cp in [origcp,newcp]:
- if not (isvalidatom(cp) and isjustname(cp)):
- raise portage.exception.InvalidPackageName(cp)
- origmatches=self.match(origcp,use_cache=0)
- if not origmatches:
- return
- for mycpv in origmatches:
- mycpsplit=catpkgsplit(mycpv)
- mynewcpv=newcp+"-"+mycpsplit[2]
- mynewcat=newcp.split("/")[0]
- if mycpsplit[3]!="r0":
- mynewcpv += "-"+mycpsplit[3]
- mycpsplit_new = catpkgsplit(mynewcpv)
- origpath=self.root+VDB_PATH+"/"+mycpv
- if not os.path.exists(origpath):
- continue
- writemsg_stdout("@")
- if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
- #create the directory
- os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
- newpath=self.root+VDB_PATH+"/"+mynewcpv
- if os.path.exists(newpath):
- #dest already exists; keep this puppy where it is.
- continue
- os.rename(origpath, newpath)
-
- # We need to rename the ebuild now.
- old_pf = catsplit(mycpv)[1]
- new_pf = catsplit(mynewcpv)[1]
- if new_pf != old_pf:
- try:
- os.rename(os.path.join(newpath, old_pf + ".ebuild"),
- os.path.join(newpath, new_pf + ".ebuild"))
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- del e
- write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
-
- write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
- fixdbentries([mylist], newpath)
-
- def update_ents(self, update_iter):
- """Run fixdbentries on all installed packages (time consuming). Like
- fixpackages, this should be run from a helper script and display
- a progress indicator."""
- dbdir = os.path.join(self.root, VDB_PATH)
- for catdir in listdir(dbdir):
- catdir = dbdir+"/"+catdir
- if os.path.isdir(catdir):
- for pkgdir in listdir(catdir):
- pkgdir = catdir+"/"+pkgdir
- if os.path.isdir(pkgdir):
- fixdbentries(update_iter, pkgdir)
-
- def move_slot_ent(self,mylist):
- pkg=mylist[1]
- origslot=mylist[2]
- newslot=mylist[3]
-
- if not isvalidatom(pkg):
- raise portage.exception.InvalidAtom(pkg)
-
- origmatches=self.match(pkg,use_cache=0)
-
- if not origmatches:
- return
- for mycpv in origmatches:
- origpath=self.root+VDB_PATH+"/"+mycpv
- if not os.path.exists(origpath):
- continue
-
- slot=grabfile(origpath+"/SLOT");
- if (not slot):
- continue
-
- if (slot[0]!=origslot):
- continue
-
- writemsg_stdout("s")
- write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
-
- def cp_list(self,mycp,use_cache=1):
- mysplit=mycp.split("/")
- if mysplit[0] == '*':
- mysplit[0] = mysplit[0][1:]
- try:
- mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
- except OSError:
- mystat=0
- if use_cache and self.cpcache.has_key(mycp):
- cpc=self.cpcache[mycp]
- if cpc[0]==mystat:
- return cpc[1]
- list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
-
- if (list is None):
- return []
- returnme=[]
- for x in list:
- if x.startswith("."):
- continue
- if x[0] == '-':
- #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
- continue
- ps=pkgsplit(x)
- if not ps:
- self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
- continue
- if len(mysplit) > 1:
- if ps[0]==mysplit[1]:
- returnme.append(mysplit[0]+"/"+x)
- if use_cache:
- self.cpcache[mycp]=[mystat,returnme]
- elif self.cpcache.has_key(mycp):
- del self.cpcache[mycp]
- return returnme
-
- def cpv_all(self,use_cache=1):
- returnme=[]
- basepath = self.root+VDB_PATH+"/"
-
- for x in self.categories:
- for y in listdir(basepath+x,EmptyOnError=1):
- if y.startswith("."):
- continue
- subpath = x+"/"+y
- # -MERGING- should never be a cpv, nor should files.
- if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
- returnme += [subpath]
- return returnme
-
- def cp_all(self,use_cache=1):
- mylist = self.cpv_all(use_cache=use_cache)
- d={}
- for y in mylist:
- if y[0] == '*':
- y = y[1:]
- mysplit=catpkgsplit(y)
- if not mysplit:
- self.invalidentry(self.root+VDB_PATH+"/"+y)
- continue
- d[mysplit[0]+"/"+mysplit[1]] = None
- return d.keys()
-
- def checkblockers(self,origdep):
- pass
-
- def match(self,origdep,use_cache=1):
- "caching match function"
- mydep = dep_expand(
- origdep, mydb=self, use_cache=use_cache, settings=self.settings)
- mykey=dep_getkey(mydep)
- mycat=mykey.split("/")[0]
- if not use_cache:
- if self.matchcache.has_key(mycat):
- del self.mtdircache[mycat]
- del self.matchcache[mycat]
- mymatch = match_from_list(mydep,
- self.cp_list(mykey, use_cache=use_cache))
- myslot = portage.dep.dep_getslot(mydep)
- if myslot is not None:
- mymatch = [cpv for cpv in mymatch \
- if self.aux_get(cpv, ["SLOT"])[0] == myslot]
- return mymatch
- try:
- curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
- except (IOError, OSError):
- curmtime=0
-
- if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
- # clear cache entry
- self.mtdircache[mycat]=curmtime
- self.matchcache[mycat]={}
- if not self.matchcache[mycat].has_key(mydep):
- mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
- myslot = portage.dep.dep_getslot(mydep)
- if myslot is not None:
- mymatch = [cpv for cpv in mymatch \
- if self.aux_get(cpv, ["SLOT"])[0] == myslot]
- self.matchcache[mycat][mydep]=mymatch
- return self.matchcache[mycat][mydep][:]
-
- def findname(self, mycpv):
- return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
-
- def flush_cache(self):
- """If the current user has permission and the internal aux_get cache has
- been updated, save it to disk and mark it unmodified. This is called
- by emerge after it has loaded the full vdb for use in dependency
- calculations. Currently, the cache is only written if the user has
- superuser privileges (since that's required to obtain a lock), but all
- users have read access and benefit from faster metadata lookups (as
- long as at least part of the cache is still valid)."""
- if self._aux_cache is not None and \
- self._aux_cache["modified"] and \
- secpass >= 2:
- valid_nodes = set(self.cpv_all())
- for cpv in self._aux_cache["packages"].keys():
- if cpv not in valid_nodes:
- del self._aux_cache["packages"][cpv]
- del self._aux_cache["modified"]
- try:
- f = atomic_ofstream(self._aux_cache_filename)
- cPickle.dump(self._aux_cache, f, -1)
- f.close()
- portage.util.apply_secpass_permissions(
- self._aux_cache_filename, gid=portage_gid, mode=0644)
- except (IOError, OSError), e:
- pass
- self._aux_cache["modified"] = False
-
- def aux_get(self, mycpv, wants):
- """This automatically caches selected keys that are frequently needed
- by emerge for dependency calculations. The cached metadata is
- considered valid if the mtime of the package directory has not changed
- since the data was cached. The cache is stored in a pickled dict
- object with the following format:
-
- {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
-
- If an error occurs while loading the cache pickle or the version is
- unrecognized, the cache will simple be recreated from scratch (it is
- completely disposable).
- """
- if not self._aux_cache_keys.intersection(wants):
- return self._aux_get(mycpv, wants)
- if self._aux_cache is None:
- try:
- f = open(self._aux_cache_filename)
- mypickle = cPickle.Unpickler(f)
- mypickle.find_global = None
- self._aux_cache = mypickle.load()
- f.close()
- del f
- except (IOError, OSError, EOFError, cPickle.UnpicklingError):
- pass
- if not self._aux_cache or \
- not isinstance(self._aux_cache, dict) or \
- self._aux_cache.get("version") != self._aux_cache_version or \
- not self._aux_cache.get("packages"):
- self._aux_cache = {"version":self._aux_cache_version}
- self._aux_cache["packages"] = {}
- self._aux_cache["modified"] = False
- mydir = os.path.join(self.root, VDB_PATH, mycpv)
- mydir_stat = None
- try:
- mydir_stat = os.stat(mydir)
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- raise KeyError(mycpv)
- mydir_mtime = long(mydir_stat.st_mtime)
- pkg_data = self._aux_cache["packages"].get(mycpv)
- mydata = {}
- cache_valid = False
- if pkg_data:
- cache_mtime, metadata = pkg_data
- cache_valid = cache_mtime == mydir_mtime
- if cache_valid and set(metadata) != self._aux_cache_keys:
- # Allow self._aux_cache_keys to change without a cache version
- # bump.
- cache_valid = False
- if cache_valid:
- mydata.update(metadata)
- pull_me = set(wants).difference(self._aux_cache_keys)
- else:
- pull_me = self._aux_cache_keys.union(wants)
- if pull_me:
- # pull any needed data and cache it
- aux_keys = list(pull_me)
- for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
- mydata[k] = v
- if not cache_valid:
- cache_data = {}
- for aux_key in self._aux_cache_keys:
- cache_data[aux_key] = mydata[aux_key]
- self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
- self._aux_cache["modified"] = True
- return [mydata[x] for x in wants]
-
- def _aux_get(self, mycpv, wants):
- mydir = os.path.join(self.root, VDB_PATH, mycpv)
- try:
- if not stat.S_ISDIR(os.stat(mydir).st_mode):
- raise KeyError(mycpv)
- except OSError, e:
- if e.errno == errno.ENOENT:
- raise KeyError(mycpv)
- del e
- raise
- results = []
- for x in wants:
- try:
- myf = open(os.path.join(mydir, x), "r")
- try:
- myd = myf.read()
- finally:
- myf.close()
- myd = " ".join(myd.split())
- except IOError:
- myd = ""
- if x == "EAPI" and not myd:
- results.append("0")
- else:
- results.append(myd)
- return results
-
- def aux_update(self, cpv, values):
- cat, pkg = cpv.split("/")
- mylink = dblink(cat, pkg, self.root, self.settings,
- treetype="vartree", vartree=self.vartree)
- if not mylink.exists():
- raise KeyError(cpv)
- for k, v in values.iteritems():
- mylink.setfile(k, v)
-
- def counter_tick(self,myroot,mycpv=None):
- return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
-
- def get_counter_tick_core(self,myroot,mycpv=None):
- return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
-
- def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
- "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
- cpath=myroot+"var/cache/edb/counter"
- changed=0
- min_counter = 0
- if mycpv:
- mysplit = pkgsplit(mycpv)
- for x in self.match(mysplit[0],use_cache=0):
- if x==mycpv:
- continue
- try:
- old_counter = long(self.aux_get(x,["COUNTER"])[0])
- writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
- except (ValueError, KeyError): # valueError from long(), KeyError from aux_get
- old_counter = 0
- writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
- if old_counter > min_counter:
- min_counter = old_counter
-
- # We write our new counter value to a new file that gets moved into
- # place to avoid filesystem corruption.
- find_counter = ("find '%s' -type f -name COUNTER | " + \
- "while read f; do echo $(<\"${f}\"); done | " + \
- "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
- if os.path.exists(cpath):
- cfile=open(cpath, "r")
- try:
- counter=long(cfile.readline())
- except (ValueError,OverflowError):
- try:
- counter = long(commands.getoutput(find_counter).strip())
- writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
- noiselevel=-1)
- changed=1
- except (ValueError,OverflowError):
- writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
- noiselevel=-1)
- writemsg("!!! corrected/normalized so that portage can operate properly.\n",
- noiselevel=-1)
- writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
- sys.exit(2)
- cfile.close()
- else:
- try:
- counter = long(commands.getoutput(find_counter).strip())
- writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
- noiselevel=-1)
- except ValueError: # Value Error for long(), probably others for commands.getoutput
- writemsg("!!! Initializing global counter.\n", noiselevel=-1)
- counter=long(0)
- changed=1
-
- if counter < min_counter:
- counter = min_counter+1000
- changed = 1
-
- if incrementing or changed:
-
- #increment counter
- counter += 1
- # update new global counter file
- write_atomic(cpath, str(counter))
- return counter
-
-class vartree(object):
- "this tree will scan a var/db/pkg database located at root (passed to init)"
- def __init__(self, root="/", virtual=None, clone=None, categories=None,
- settings=None):
- if clone:
- self.root = clone.root[:]
- self.dbapi = copy.deepcopy(clone.dbapi)
- self.populated = 1
- self.settings = config(clone=clone.settings)
- else:
- self.root = root[:]
- if settings is None:
- settings = globals()["settings"]
- self.settings = settings # for key_expand calls
- if categories is None:
- categories = settings.categories
- self.dbapi = vardbapi(self.root, categories=categories,
- settings=settings, vartree=self)
- self.populated = 1
-
- def zap(self,mycpv):
- return
-
- def inject(self,mycpv):
- return
-
- def get_provide(self,mycpv):
- myprovides=[]
- mylines = None
- try:
- mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE","USE"])
- if mylines:
- myuse = myuse.split()
- mylines = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(mylines), uselist=myuse))
- for myprovide in mylines:
- mys = catpkgsplit(myprovide)
- if not mys:
- mys = myprovide.split("/")
- myprovides += [mys[0] + "/" + mys[1]]
- return myprovides
- except SystemExit, e:
- raise
- except Exception, e:
- mydir = os.path.join(self.root, VDB_PATH, mycpv)
- writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
- noiselevel=-1)
- if mylines:
- writemsg("Possibly Invalid: '%s'\n" % str(mylines),
- noiselevel=-1)
- writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
- return []
-
- def get_all_provides(self):
- myprovides = {}
- for node in self.getallcpv():
- for mykey in self.get_provide(node):
- if myprovides.has_key(mykey):
- myprovides[mykey] += [node]
- else:
- myprovides[mykey] = [node]
- return myprovides
-
- def dep_bestmatch(self,mydep,use_cache=1):
- "compatibility method -- all matches, not just visible ones"
- #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
- mymatch = best(self.dbapi.match(
- dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
- use_cache=use_cache))
- if mymatch is None:
- return ""
- else:
- return mymatch
-
- def dep_match(self,mydep,use_cache=1):
- "compatibility method -- we want to see all matches, not just visible ones"
- #mymatch=match(mydep,self.dbapi)
- mymatch=self.dbapi.match(mydep,use_cache=use_cache)
- if mymatch is None:
- return []
- else:
- return mymatch
-
- def exists_specific(self,cpv):
- return self.dbapi.cpv_exists(cpv)
-
- def getallcpv(self):
- """temporary function, probably to be renamed --- Gets a list of all
- category/package-versions installed on the system."""
- return self.dbapi.cpv_all()
-
- def getallnodes(self):
- """new behavior: these are all *unmasked* nodes. There may or may not be available
- masked package for nodes in this nodes list."""
- return self.dbapi.cp_all()
-
- def exists_specific_cat(self,cpv,use_cache=1):
- cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
- settings=self.settings)
- a=catpkgsplit(cpv)
- if not a:
- return 0
- mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
- for x in mylist:
- b=pkgsplit(x)
- if not b:
- self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
- continue
- if a[1]==b[0]:
- return 1
- return 0
-
- def getebuildpath(self,fullpackage):
- cat,package=fullpackage.split("/")
- return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
-
- def getnode(self,mykey,use_cache=1):
- mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
- settings=self.settings)
- if not mykey:
- return []
- mysplit=mykey.split("/")
- mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
- returnme=[]
- for x in mydirlist:
- mypsplit=pkgsplit(x)
- if not mypsplit:
- self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
- continue
- if mypsplit[0]==mysplit[1]:
- appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
- returnme.append(appendme)
- return returnme
-
-
- def getslot(self,mycatpkg):
- "Get a slot for a catpkg; assume it exists."
- try:
- return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
- except KeyError:
- return ""
-
- def hasnode(self,mykey,use_cache):
- """Does the particular node (cat/pkg key) exist?"""
- mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
- settings=self.settings)
- mysplit=mykey.split("/")
- mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
- for x in mydirlist:
- mypsplit=pkgsplit(x)
- if not mypsplit:
- self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
- continue
- if mypsplit[0]==mysplit[1]:
- return 1
- return 0
-
- def populate(self):
- self.populated=1
auxdbkeys=[
'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
@@ -5605,2264 +4656,11 @@ auxdbkeys=[
]
auxdbkeylen=len(auxdbkeys)
-def close_portdbapi_caches():
- for i in portdbapi.portdbapi_instances:
- i.close_caches()
-
-
-class portdbapi(dbapi):
- """this tree will scan a portage directory located at root (passed to init)"""
- portdbapi_instances = []
-
- def __init__(self,porttree_root,mysettings=None):
- portdbapi.portdbapi_instances.append(self)
-
- if mysettings:
- self.mysettings = mysettings
- else:
- global settings
- self.mysettings = config(clone=settings)
-
- # This is strictly for use in aux_get() doebuild calls when metadata
- # is generated by the depend phase. It's safest to use a clone for
- # this purpose because doebuild makes many changes to the config
- # instance that is passed in.
- self.doebuild_settings = config(clone=self.mysettings)
-
- self.manifestVerifyLevel = None
- self.manifestVerifier = None
- self.manifestCache = {} # {location: [stat, md5]}
- self.manifestMissingCache = []
-
- if "gpg" in self.mysettings.features:
- self.manifestVerifyLevel = portage.gpg.EXISTS
- if "strict" in self.mysettings.features:
- self.manifestVerifyLevel = portage.gpg.MARGINAL
- self.manifestVerifier = portage.gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
- elif "severe" in self.mysettings.features:
- self.manifestVerifyLevel = portage.gpg.TRUSTED
- self.manifestVerifier = portage.gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
- else:
- self.manifestVerifier = portage.gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
-
- #self.root=settings["PORTDIR"]
- self.porttree_root = os.path.realpath(porttree_root)
-
- self.depcachedir = self.mysettings.depcachedir[:]
-
- self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
- if self.tmpfs and not os.path.exists(self.tmpfs):
- self.tmpfs = None
- if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
- self.tmpfs = None
- if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
- self.tmpfs = None
-
- self.eclassdb = portage.eclass_cache.cache(self.porttree_root,
- overlays=self.mysettings["PORTDIR_OVERLAY"].split())
-
- self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
-
- #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
- self.xcache={}
- self.frozen=0
-
- self.porttrees = [self.porttree_root] + \
- [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
- self.treemap = {}
- for path in self.porttrees:
- repo_name_path = os.path.join( path, REPO_NAME_LOC )
- try:
- repo_name = open( repo_name_path ,'r').readline().strip()
- self.treemap[repo_name] = path
- except (OSError,IOError):
- pass
-
- self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
- self.auxdb = {}
- self._init_cache_dirs()
- # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
- # ~harring
- filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
- if secpass < 1:
- from portage.cache import metadata_overlay, volatile
- for x in self.porttrees:
- db_ro = self.auxdbmodule(self.depcachedir, x,
- filtered_auxdbkeys, gid=portage_gid, readonly=True)
- self.auxdb[x] = metadata_overlay.database(
- self.depcachedir, x, filtered_auxdbkeys,
- gid=portage_gid, db_rw=volatile.database,
- db_ro=db_ro)
- else:
- for x in self.porttrees:
- # location, label, auxdbkeys
- self.auxdb[x] = self.auxdbmodule(
- self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
- # Selectively cache metadata in order to optimize dep matching.
- self._aux_cache_keys = set(["EAPI", "KEYWORDS", "SLOT"])
- self._aux_cache = {}
-
- def _init_cache_dirs(self):
- """Create /var/cache/edb/dep and adjust permissions for the portage
- group."""
-
- dirmode = 02070
- filemode = 060
- modemask = 02
-
- try:
- for mydir in (self.depcachedir,):
- if portage.util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
- writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
- noiselevel=-1)
- def onerror(e):
- raise # bail out on the first error that occurs during recursion
- if not apply_recursive_permissions(mydir,
- gid=portage_gid, dirmode=dirmode, dirmask=modemask,
- filemode=filemode, filemask=modemask, onerror=onerror):
- raise portage.exception.OperationNotPermitted(
- "Failed to apply recursive permissions for the portage group.")
- except portage.exception.PortageException, e:
- pass
-
- def close_caches(self):
- for x in self.auxdb.keys():
- self.auxdb[x].sync()
- self.auxdb.clear()
-
- def flush_cache(self):
- for x in self.auxdb.values():
- x.sync()
-
- def finddigest(self,mycpv):
- try:
- mydig = self.findname2(mycpv)[0]
- if not mydig:
- return ""
- mydigs = mydig.split("/")[:-1]
- mydig = "/".join(mydigs)
- mysplit = mycpv.split("/")
- except OSError:
- return ""
- return mydig+"/files/digest-"+mysplit[-1]
-
- def findname(self,mycpv):
- return self.findname2(mycpv)[0]
-
- def getRepositoryPath( self, repository_id ):
- """
- This function is required for GLEP 42 compliance; given a valid repository ID
- it must return a path to the repository
- TreeMap = { id:path }
- """
- if repository_id in self.treemap:
- return self.treemap[repository_id]
- return None
-
- def getRepositories( self ):
- """
- This function is required for GLEP 42 compliance; it will return a list of
- repository ID's
- TreeMap = { id:path }
- """
- return [k for k in self.treemap.keys() if k]
-
- def findname2(self, mycpv, mytree=None):
- """
- Returns the location of the CPV, and what overlay it was in.
- Searches overlays first, then PORTDIR; this allows us to return the first
- matching file. As opposed to starting in portdir and then doing overlays
- second, we would have to exhaustively search the overlays until we found
- the file we wanted.
- """
- if not mycpv:
- return "",0
- mysplit=mycpv.split("/")
- psplit=pkgsplit(mysplit[1])
-
- if mytree:
- mytrees = [mytree]
- else:
- mytrees = self.porttrees[:]
- mytrees.reverse()
- if psplit:
- for x in mytrees:
- file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
- if os.access(file, os.R_OK):
- return[file, x]
- return None, 0
-
- def aux_get(self, mycpv, mylist, mytree=None):
- "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
- 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
- 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
- cache_me = False
- if not mytree and not set(mylist).difference(self._aux_cache_keys):
- aux_cache = self._aux_cache.get(mycpv)
- if aux_cache is not None:
- return [aux_cache[x] for x in mylist]
- cache_me = True
- global auxdbkeys,auxdbkeylen
- cat,pkg = mycpv.split("/", 1)
-
- myebuild, mylocation = self.findname2(mycpv, mytree)
-
- if not myebuild:
- writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
- noiselevel=1)
- writemsg("!!! %s\n" % myebuild, noiselevel=1)
- raise KeyError(mycpv)
-
- myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
- if "gpg" in self.mysettings.features:
- try:
- mys = portage.gpg.fileStats(myManifestPath)
- if (myManifestPath in self.manifestCache) and \
- (self.manifestCache[myManifestPath] == mys):
- pass
- elif self.manifestVerifier:
- if not self.manifestVerifier.verify(myManifestPath):
- # Verification failed the desired level.
- raise portage.exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
-
- if ("severe" in self.mysettings.features) and \
- (mys != portage.gpg.fileStats(myManifestPath)):
- raise portage.exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
-
- except portage.exception.InvalidSignature, e:
- if ("strict" in self.mysettings.features) or \
- ("severe" in self.mysettings.features):
- raise
- writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
- except portage.exception.MissingSignature, e:
- if ("severe" in self.mysettings.features):
- raise
- if ("strict" in self.mysettings.features):
- if myManifestPath not in self.manifestMissingCache:
- writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
- self.manifestMissingCache.insert(0,myManifestPath)
- except (OSError,portage.exception.FileNotFound), e:
- if ("strict" in self.mysettings.features) or \
- ("severe" in self.mysettings.features):
- raise portage.exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
- writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
- noiselevel=-1)
-
-
- if os.access(myebuild, os.R_OK):
- emtime=os.stat(myebuild)[stat.ST_MTIME]
- else:
- writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
- noiselevel=-1)
- writemsg("!!! %s\n" % myebuild,
- noiselevel=-1)
- raise KeyError
-
- try:
- mydata = self.auxdb[mylocation][mycpv]
- if emtime != long(mydata.get("_mtime_", 0)):
- doregen = True
- elif len(mydata.get("_eclasses_", [])) > 0:
- doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
- else:
- doregen = False
-
- except KeyError:
- doregen = True
- except CacheError:
- doregen = True
- try: del self.auxdb[mylocation][mycpv]
- except KeyError: pass
-
- writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
-
- if doregen:
- writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
- writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
-
- self.doebuild_settings.reset()
- mydata = {}
- myret = doebuild(myebuild, "depend",
- self.doebuild_settings["ROOT"], self.doebuild_settings,
- dbkey=mydata, tree="porttree", mydbapi=self)
- if myret != os.EX_OK:
- raise KeyError(mycpv)
-
- if "EAPI" not in mydata or not mydata["EAPI"].strip():
- mydata["EAPI"] = "0"
-
- if not eapi_is_supported(mydata["EAPI"]):
- # if newer version, wipe everything and negate eapi
- eapi = mydata["EAPI"]
- mydata = {}
- map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
- mydata["EAPI"] = "-"+eapi
-
- if mydata.get("INHERITED", False):
- mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
- else:
- mydata["_eclasses_"] = {}
-
- del mydata["INHERITED"]
-
- mydata["_mtime_"] = emtime
-
- self.auxdb[mylocation][mycpv] = mydata
-
- if not mydata.setdefault("EAPI", "0"):
- mydata["EAPI"] = "0"
-
- #finally, we look at our internal cache entry and return the requested data.
- returnme = []
- for x in mylist:
- if x == "INHERITED":
- returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
- else:
- returnme.append(mydata.get(x,""))
-
- if cache_me:
- aux_cache = {}
- for x in self._aux_cache_keys:
- aux_cache[x] = mydata.get(x, "")
- self._aux_cache[mycpv] = aux_cache
-
- return returnme
-
- def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
- if mysettings is None:
- mysettings = self.mysettings
- try:
- myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
- except KeyError:
- print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
- sys.exit(1)
-
- if useflags is None:
- useflags = mysettings["USE"].split()
-
- myurilist = portage.dep.paren_reduce(myuris)
- myurilist = portage.dep.use_reduce(myurilist,uselist=useflags,matchall=all)
- newuris = flatten(myurilist)
-
- myfiles = []
- for x in newuris:
- mya = os.path.basename(x)
- if not mya in myfiles:
- myfiles.append(mya)
- return [newuris, myfiles]
-
- def getfetchsizes(self,mypkg,useflags=None,debug=0):
- # returns a filename:size dictionnary of remaining downloads
- myebuild = self.findname(mypkg)
- pkgdir = os.path.dirname(myebuild)
- mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
- checksums = mf.getDigests()
- if not checksums:
- if debug: print "[empty/missing/bad digest]: "+mypkg
- return None
- filesdict={}
- if useflags is None:
- myuris, myfiles = self.getfetchlist(mypkg,all=1)
- else:
- myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
- #XXX: maybe this should be improved: take partial downloads
- # into account? check checksums?
- for myfile in myfiles:
- if myfile not in checksums:
- if debug:
- writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
- continue
- file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
- mystat = None
- try:
- mystat = os.stat(file_path)
- except OSError, e:
- pass
- if mystat is None:
- existing_size = 0
- else:
- existing_size = mystat.st_size
- remaining_size = int(checksums[myfile]["size"]) - existing_size
- if remaining_size > 0:
- # Assume the download is resumable.
- filesdict[myfile] = remaining_size
- elif remaining_size < 0:
- # The existing file is too large and therefore corrupt.
- filesdict[myfile] = int(checksums[myfile]["size"])
- return filesdict
-
- def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
- if not useflags:
- if mysettings:
- useflags = mysettings["USE"].split()
- myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
- myebuild = self.findname(mypkg)
- pkgdir = os.path.dirname(myebuild)
- mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
- mysums = mf.getDigests()
-
- failures = {}
- for x in myfiles:
- if not mysums or x not in mysums:
- ok = False
- reason = "digest missing"
- else:
- try:
- ok, reason = portage.checksum.verify_all(
- os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
- except portage.exception.FileNotFound, e:
- ok = False
- reason = "File Not Found: '%s'" % str(e)
- if not ok:
- failures[x] = reason
- if failures:
- return False
- return True
-
- def getsize(self,mypkg,useflags=None,debug=0):
- # returns the total size of remaining downloads
- #
- # we use getfetchsizes() now, so this function would be obsoleted
- #
- filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
- if filesdict is None:
- return "[empty/missing/bad digest]"
- mysize=0
- for myfile in filesdict.keys():
- mysum+=filesdict[myfile]
- return mysum
-
- def cpv_exists(self,mykey):
- "Tells us whether an actual ebuild exists on disk (no masking)"
- cps2=mykey.split("/")
- cps=catpkgsplit(mykey,silent=0)
- if not cps:
- #invalid cat/pkg-v
- return 0
- if self.findname(cps[0]+"/"+cps2[1]):
- return 1
- else:
- return 0
-
- def cp_all(self):
- "returns a list of all keys in our tree"
- d={}
- for x in self.mysettings.categories:
- for oroot in self.porttrees:
- for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
- d[x+"/"+y] = None
- l = d.keys()
- l.sort()
- return l
-
- def p_list(self,mycp):
- d={}
- for oroot in self.porttrees:
- for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
- if x[-7:]==".ebuild":
- d[x[:-7]] = None
- return d.keys()
-
- def cp_list(self, mycp, use_cache=1, mytree=None):
- mysplit=mycp.split("/")
- d={}
- if mytree:
- mytrees = [mytree]
- else:
- mytrees = self.porttrees
- for oroot in mytrees:
- for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
- if x.endswith(".ebuild"):
- pf = x[:-7]
- ps = pkgsplit(pf)
- if not ps:
- writemsg("\nInvalid ebuild name: %s\n" % \
- os.path.join(oroot, mycp, x), noiselevel=-1)
- continue
- d[mysplit[0]+"/"+pf] = None
- return d.keys()
-
- def freeze(self):
- for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
- self.xcache[x]={}
- self.frozen=1
-
- def melt(self):
- self.xcache={}
- self.frozen=0
-
- def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
- "caching match function; very trick stuff"
- #if no updates are being made to the tree, we can consult our xcache...
- if self.frozen:
- try:
- return self.xcache[level][origdep][:]
- except KeyError:
- pass
-
- if not mydep:
- #this stuff only runs on first call of xmatch()
- #create mydep, mykey from origdep
- mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
- mykey=dep_getkey(mydep)
-
- if level=="list-visible":
- #a list of all visible packages, not called directly (just by xmatch())
- #myval=self.visible(self.cp_list(mykey))
- myval=self.gvisible(self.visible(self.cp_list(mykey)))
- elif level=="bestmatch-visible":
- #dep match -- best match of all visible packages
- myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
- #get all visible matches (from xmatch()), then choose the best one
- elif level=="bestmatch-list":
- #dep match -- find best match but restrict search to sublist
- myval=best(match_from_list(mydep,mylist))
- #no point is calling xmatch again since we're not caching list deps
- elif level=="match-list":
- #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
- myval=match_from_list(mydep,mylist)
- elif level=="match-visible":
- #dep match -- find all visible matches
- myval = match_from_list(mydep,
- self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey))
- #get all visible packages, then get the matching ones
- elif level=="match-all":
- #match *all* visible *and* masked packages
- myval=match_from_list(mydep,self.cp_list(mykey))
- else:
- print "ERROR: xmatch doesn't handle",level,"query!"
- raise KeyError
- myslot = portage.dep.dep_getslot(mydep)
- if myslot is not None:
- slotmatches = []
- for cpv in myval:
- try:
- if self.aux_get(cpv, ["SLOT"])[0] == myslot:
- slotmatches.append(cpv)
- except KeyError:
- pass # ebuild masked by corruption
- myval = slotmatches
- if self.frozen and (level not in ["match-list","bestmatch-list"]):
- self.xcache[level][mydep]=myval
- if origdep and origdep != mydep:
- self.xcache[level][origdep] = myval
- return myval[:]
-
- def match(self,mydep,use_cache=1):
- return self.xmatch("match-visible",mydep)
-
- def visible(self,mylist):
- """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
- packages file to remove invisible entries, returning remaining items. This function assumes
- that all entries in mylist have the same category and package name."""
- if (mylist is None) or (len(mylist)==0):
- return []
- newlist=mylist[:]
- #first, we mask out packages in the package.mask file
- mykey=newlist[0]
- cpv=catpkgsplit(mykey)
- if not cpv:
- #invalid cat/pkg-v
- print "visible(): invalid cat/pkg-v:",mykey
- return []
- mycp=cpv[0]+"/"+cpv[1]
- maskdict=self.mysettings.pmaskdict
- unmaskdict=self.mysettings.punmaskdict
- if maskdict.has_key(mycp):
- for x in maskdict[mycp]:
- mymatches=self.xmatch("match-all",x)
- if mymatches is None:
- #error in package.mask file; print warning and continue:
- print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
- continue
- for y in mymatches:
- unmask=0
- if unmaskdict.has_key(mycp):
- for z in unmaskdict[mycp]:
- mymatches_unmask=self.xmatch("match-all",z)
- if y in mymatches_unmask:
- unmask=1
- break
- if unmask==0:
- try:
- newlist.remove(y)
- except ValueError:
- pass
-
- revmaskdict=self.mysettings.prevmaskdict
- if revmaskdict.has_key(mycp):
- for x in revmaskdict[mycp]:
- #important: only match against the still-unmasked entries...
- #notice how we pass "newlist" to the xmatch() call below....
- #Without this, ~ deps in the packages files are broken.
- mymatches=self.xmatch("match-list",x,mylist=newlist)
- if mymatches is None:
- #error in packages file; print warning and continue:
- print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
- continue
- pos=0
- while pos<len(newlist):
- if newlist[pos] not in mymatches:
- del newlist[pos]
- else:
- pos += 1
- return newlist
-
- def gvisible(self,mylist):
- "strip out group-masked (not in current group) entries"
-
- if mylist is None:
- return []
- newlist=[]
-
- accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
- pkgdict = self.mysettings.pkeywordsdict
- for mycpv in mylist:
- try:
- keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
- except KeyError:
- continue
- except portage.exception.PortageException, e:
- writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % \
- mycpv, noiselevel=-1)
- writemsg("!!! %s\n" % str(e), noiselevel=-1)
- del e
- continue
- mygroups=keys.split()
- # Repoman may modify this attribute as necessary.
- pgroups = accept_keywords[:]
- match=0
- cp = dep_getkey(mycpv)
- if pkgdict.has_key(cp):
- matches = match_to_list(mycpv, pkgdict[cp].keys())
- for atom in matches:
- pgroups.extend(pkgdict[cp][atom])
- if matches:
- inc_pgroups = []
- for x in pgroups:
- # The -* special case should be removed once the tree
- # is clean of KEYWORDS=-* crap
- if x != "-*" and x.startswith("-"):
- try:
- inc_pgroups.remove(x[1:])
- except ValueError:
- pass
- if x not in inc_pgroups:
- inc_pgroups.append(x)
- pgroups = inc_pgroups
- del inc_pgroups
- hasstable = False
- hastesting = False
- for gp in mygroups:
- if gp=="*":
- writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
- noiselevel=-1)
- match=1
- break
- elif gp in pgroups:
- match=1
- break
- elif gp[0] == "~":
- hastesting = True
- elif gp[0] != "-":
- hasstable = True
- if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups) or "**" in pgroups):
- match=1
- if match and eapi_is_supported(eapi):
- newlist.append(mycpv)
- return newlist
-
-class binarytree(object):
- "this tree scans for a list of all packages available in PKGDIR"
- def __init__(self, root, pkgdir, virtual=None, settings=None, clone=None):
- if clone:
- # XXX This isn't cloning. It's an instance of the same thing.
- self.root=clone.root
- self.pkgdir=clone.pkgdir
- self.dbapi=clone.dbapi
- self.populated=clone.populated
- self.tree=clone.tree
- self.remotepkgs=clone.remotepkgs
- self.invalids=clone.invalids
- self.settings = clone.settings
- else:
- self.root=root
- #self.pkgdir=settings["PKGDIR"]
- self.pkgdir = normalize_path(pkgdir)
- self.dbapi = bindbapi(self, settings=settings)
- self.populated=0
- self.tree={}
- self.remotepkgs={}
- self.invalids=[]
- self.settings = settings
- self._pkg_paths = {}
-
- def move_ent(self,mylist):
- if not self.populated:
- self.populate()
- origcp=mylist[1]
- newcp=mylist[2]
- # sanity check
- for cp in [origcp,newcp]:
- if not (isvalidatom(cp) and isjustname(cp)):
- raise portage.exception.InvalidPackageName(cp)
- origcat = origcp.split("/")[0]
- mynewcat=newcp.split("/")[0]
- origmatches=self.dbapi.cp_list(origcp)
- if not origmatches:
- return
- for mycpv in origmatches:
-
- mycpsplit=catpkgsplit(mycpv)
- mynewcpv=newcp+"-"+mycpsplit[2]
- if mycpsplit[3]!="r0":
- mynewcpv += "-"+mycpsplit[3]
- myoldpkg=mycpv.split("/")[1]
- mynewpkg=mynewcpv.split("/")[1]
-
- if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
- writemsg("!!! Cannot update binary: Destination exists.\n",
- noiselevel=-1)
- writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
- continue
-
- tbz2path=self.getname(mycpv)
- if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
- writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
- noiselevel=-1)
- continue
-
- #print ">>> Updating data in:",mycpv
- writemsg_stdout("%")
- mytbz2 = portage.xpak.tbz2(tbz2path)
- mydata = mytbz2.get_data()
- updated_items = update_dbentries([mylist], mydata)
- mydata.update(updated_items)
- mydata["CATEGORY"] = mynewcat+"\n"
- if mynewpkg != myoldpkg:
- mydata[mynewpkg+".ebuild"] = mydata[myoldpkg+".ebuild"]
- del mydata[myoldpkg+".ebuild"]
- mydata["PF"] = mynewpkg + "\n"
- mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
-
- self.dbapi.cpv_remove(mycpv)
- del self._pkg_paths[mycpv]
- new_path = self.getname(mynewcpv)
- self._pkg_paths[mynewcpv] = os.path.join(
- *new_path.split(os.path.sep)[-2:])
- if new_path != mytbz2:
- try:
- os.makedirs(os.path.dirname(new_path))
- except OSError, e:
- if e.errno != errno.EEXIST:
- raise
- del e
- os.rename(tbz2path, new_path)
- self._remove_symlink(mycpv)
- if new_path.split(os.path.sep)[-2] == "All":
- self._create_symlink(mynewcpv)
- self.dbapi.cpv_inject(mynewcpv)
-
- return 1
-
- def _remove_symlink(self, cpv):
- """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
- the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
- removed if os.path.islink() returns False."""
- mycat, mypkg = catsplit(cpv)
- mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
- if os.path.islink(mylink):
- """Only remove it if it's really a link so that this method never
- removes a real package that was placed here to avoid a collision."""
- os.unlink(mylink)
- try:
- os.rmdir(os.path.join(self.pkgdir, mycat))
- except OSError, e:
- if e.errno not in (errno.ENOENT,
- errno.ENOTEMPTY, errno.EEXIST):
- raise
- del e
-
- def _create_symlink(self, cpv):
- """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
- ${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
- exist in the location of the symlink will first be removed."""
- mycat, mypkg = catsplit(cpv)
- full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
- try:
- os.makedirs(os.path.dirname(full_path))
- except OSError, e:
- if e.errno != errno.EEXIST:
- raise
- del e
- try:
- os.unlink(full_path)
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- del e
- os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
-
- def move_slot_ent(self, mylist):
- if not self.populated:
- self.populate()
- pkg=mylist[1]
- origslot=mylist[2]
- newslot=mylist[3]
-
- if not isvalidatom(pkg):
- raise portage.exception.InvalidAtom(pkg)
-
- origmatches=self.dbapi.match(pkg)
- if not origmatches:
- return
- for mycpv in origmatches:
- mycpsplit=catpkgsplit(mycpv)
- myoldpkg=mycpv.split("/")[1]
- tbz2path=self.getname(mycpv)
- if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
- writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
- noiselevel=-1)
- continue
-
- #print ">>> Updating data in:",mycpv
- mytbz2 = portage.xpak.tbz2(tbz2path)
- mydata = mytbz2.get_data()
-
- slot = mydata["SLOT"]
- if (not slot):
- continue
-
- if (slot[0]!=origslot):
- continue
-
- writemsg_stdout("S")
- mydata["SLOT"] = newslot+"\n"
- mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
- return 1
-
- def update_ents(self, update_iter):
- if len(update_iter) == 0:
- return
- if not self.populated:
- self.populate()
-
- for mycpv in self.dbapi.cp_all():
- tbz2path=self.getname(mycpv)
- if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
- writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
- noiselevel=-1)
- continue
- #print ">>> Updating binary data:",mycpv
- writemsg_stdout("*")
- mytbz2 = portage.xpak.tbz2(tbz2path)
- mydata = mytbz2.get_data()
- updated_items = update_dbentries(update_iter, mydata)
- if len(updated_items) > 0:
- mydata.update(updated_items)
- mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
- return 1
-
- def prevent_collision(self, cpv):
- """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
- use for a given cpv. If a collision will occur with an existing
- package from another category, the existing package will be bumped to
- ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
- full_path = self.getname(cpv)
- if "All" == full_path.split(os.path.sep)[-2]:
- return
- """Move a colliding package if it exists. Code below this point only
- executes in rare cases."""
- mycat, mypkg = catsplit(cpv)
- myfile = mypkg + ".tbz2"
- mypath = os.path.join("All", myfile)
- dest_path = os.path.join(self.pkgdir, mypath)
- if os.path.exists(dest_path):
- # For invalid packages, other_cat could be None.
- other_cat = portage.xpak.tbz2(dest_path).getfile("CATEGORY")
- if other_cat:
- other_cat = other_cat.strip()
- self._move_from_all(other_cat + "/" + mypkg)
- """The file may or may not exist. Move it if necessary and update
- internal state for future calls to getname()."""
- self._move_to_all(cpv)
-
- def _move_to_all(self, cpv):
- """If the file exists, move it. Whether or not it exists, update state
- for future getname() calls."""
- mycat , mypkg = catsplit(cpv)
- myfile = mypkg + ".tbz2"
- src_path = os.path.join(self.pkgdir, mycat, myfile)
- try:
- mystat = os.lstat(src_path)
- except OSError, e:
- mystat = None
- if mystat and stat.S_ISREG(mystat.st_mode):
- try:
- os.makedirs(os.path.join(self.pkgdir, "All"))
- except OSError, e:
- if e.errno != errno.EEXIST:
- raise
- del e
- os.rename(src_path, os.path.join(self.pkgdir, "All", myfile))
- self._create_symlink(cpv)
- self._pkg_paths[cpv] = os.path.join("All", myfile)
-
- def _move_from_all(self, cpv):
- """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
- ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
- self._remove_symlink(cpv)
- mycat , mypkg = catsplit(cpv)
- myfile = mypkg + ".tbz2"
- mypath = os.path.join(mycat, myfile)
- dest_path = os.path.join(self.pkgdir, mypath)
- try:
- os.makedirs(os.path.dirname(dest_path))
- except OSError, e:
- if e.errno != errno.EEXIST:
- raise
- del e
- os.rename(os.path.join(self.pkgdir, "All", myfile), dest_path)
- self._pkg_paths[cpv] = mypath
-
- def populate(self, getbinpkgs=0,getbinpkgsonly=0):
- "populates the binarytree"
- if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
- return 0
- if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
- return 0
-
- if not getbinpkgsonly:
- pkg_paths = {}
- dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
- if "All" in dirs:
- dirs.remove("All")
- dirs.sort()
- dirs.insert(0, "All")
- for mydir in dirs:
- for myfile in listdir(os.path.join(self.pkgdir, mydir)):
- if not myfile.endswith(".tbz2"):
- continue
- mypath = os.path.join(mydir, myfile)
- full_path = os.path.join(self.pkgdir, mypath)
- if os.path.islink(full_path):
- continue
- mytbz2 = portage.xpak.tbz2(full_path)
- # For invalid packages, mycat could be None.
- mycat = mytbz2.getfile("CATEGORY")
- mypf = mytbz2.getfile("PF")
- mypkg = myfile[:-5]
- if not mycat or not mypf:
- #old-style or corrupt package
- writemsg("!!! Invalid binary package: '%s'\n" % full_path,
- noiselevel=-1)
- writemsg("!!! This binary package is not " + \
- "recoverable and should be deleted.\n",
- noiselevel=-1)
- self.invalids.append(mypkg)
- continue
- mycat = mycat.strip()
- if mycat != mydir and mydir != "All":
- continue
- if mypkg != mypf.strip():
- continue
- mycpv = mycat + "/" + mypkg
- if mycpv in pkg_paths:
- # All is first, so it's preferred.
- continue
- pkg_paths[mycpv] = mypath
- self.dbapi.cpv_inject(mycpv)
- self._pkg_paths = pkg_paths
-
- if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
- writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
- noiselevel=-1)
-
- if getbinpkgs and \
- self.settings["PORTAGE_BINHOST"] and not self.remotepkgs:
- try:
- chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
- if chunk_size < 8:
- chunk_size = 8
- except (ValueError, KeyError):
- chunk_size = 3000
-
- writemsg(green("Fetching binary packages info...\n"))
- self.remotepkgs = portage.getbinpkg.dir_get_metadata(
- self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
- writemsg(green(" -- DONE!\n\n"))
-
- for mypkg in self.remotepkgs.keys():
- if not self.remotepkgs[mypkg].has_key("CATEGORY"):
- #old-style or corrupt package
- writemsg("!!! Invalid remote binary package: "+mypkg+"\n",
- noiselevel=-1)
- del self.remotepkgs[mypkg]
- continue
- mycat=self.remotepkgs[mypkg]["CATEGORY"].strip()
- fullpkg=mycat+"/"+mypkg[:-5]
- mykey=dep_getkey(fullpkg)
- try:
- # invalid tbz2's can hurt things.
- #print "cpv_inject("+str(fullpkg)+")"
- self.dbapi.cpv_inject(fullpkg)
- #print " -- Injected"
- except SystemExit, e:
- raise
- except:
- writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n",
- noiselevel=-1)
- del self.remotepkgs[mypkg]
- continue
- self.populated=1
-
- def inject(self,cpv):
- return self.dbapi.cpv_inject(cpv)
-
- def exists_specific(self,cpv):
- if not self.populated:
- self.populate()
- return self.dbapi.match(
- dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
-
- def dep_bestmatch(self,mydep):
- "compatibility method -- all matches, not just visible ones"
- if not self.populated:
- self.populate()
- writemsg("\n\n", 1)
- writemsg("mydep: %s\n" % mydep, 1)
- mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
- writemsg("mydep: %s\n" % mydep, 1)
- mykey=dep_getkey(mydep)
- writemsg("mykey: %s\n" % mykey, 1)
- mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
- writemsg("mymatch: %s\n" % mymatch, 1)
- if mymatch is None:
- return ""
- return mymatch
-
- def getname(self,pkgname):
- """Returns a file location for this package. The default location is
- ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
- in the rare event of a collision. The prevent_collision() method can
- be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
- specific cpv."""
- if not self.populated:
- self.populate()
- mycpv = pkgname
- mypath = self._pkg_paths.get(mycpv, None)
- if mypath:
- return os.path.join(self.pkgdir, mypath)
- mycat, mypkg = catsplit(mycpv)
- mypath = os.path.join("All", mypkg + ".tbz2")
- if mypath in self._pkg_paths.values():
- mypath = os.path.join(mycat, mypkg + ".tbz2")
- self._pkg_paths[mycpv] = mypath # cache for future lookups
- return os.path.join(self.pkgdir, mypath)
-
- def isremote(self,pkgname):
- "Returns true if the package is kept remotely."
- mysplit=pkgname.split("/")
- remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
- return remote
-
- def get_use(self,pkgname):
- mysplit=pkgname.split("/")
- if self.isremote(pkgname):
- return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
- tbz2=portage.xpak.tbz2(self.getname(pkgname))
- return tbz2.getfile("USE").split()
-
- def gettbz2(self,pkgname):
- "fetches the package from a remote site, if necessary."
- print "Fetching '"+str(pkgname)+"'"
- mysplit = pkgname.split("/")
- tbz2name = mysplit[1]+".tbz2"
- if not self.isremote(pkgname):
- if (tbz2name not in self.invalids):
- return
- else:
- writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n",
- noiselevel=-1)
- mydest = self.pkgdir+"/All/"
- try:
- os.makedirs(mydest, 0775)
- except (OSError, IOError):
- pass
- return portage.getbinpkg.file_get(
- self.settings["PORTAGE_BINHOST"] + "/" + tbz2name,
- mydest, fcmd=self.settings["RESUMECOMMAND"])
-
- def getslot(self,mycatpkg):
- "Get a slot for a catpkg; assume it exists."
- myslot = ""
- try:
- myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
- except SystemExit, e:
- raise
- except Exception, e:
- pass
- return myslot
-
-class dblink:
- """
- This class provides an interface to the installed package database
- At present this is implemented as a text backend in /var/db/pkg.
- """
- def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
- vartree=None):
- """
- Creates a DBlink object for a given CPV.
- The given CPV may not be present in the database already.
-
- @param cat: Category
- @type cat: String
- @param pkg: Package (PV)
- @type pkg: String
- @param myroot: Typically ${ROOT}
- @type myroot: String (Path)
- @param mysettings: Typically portage.config
- @type mysettings: An instance of portage.config
- @param treetype: one of ['porttree','bintree','vartree']
- @type treetype: String
- @param vartree: an instance of vartree corresponding to myroot.
- @type vartree: vartree
- """
-
- self.cat = cat
- self.pkg = pkg
- self.mycpv = self.cat+"/"+self.pkg
- self.mysplit = pkgsplit(self.mycpv)
- self.treetype = treetype
- if vartree is None:
- global db
- vartree = db[myroot]["vartree"]
- self.vartree = vartree
-
- self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
- self.dbcatdir = self.dbroot+"/"+cat
- self.dbpkgdir = self.dbcatdir+"/"+pkg
- self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
- self.dbdir = self.dbpkgdir
-
- self._lock_vdb = None
-
- self.settings = mysettings
- if self.settings==1:
- raise ValueError
-
- self.myroot=myroot
- protect_obj = portage.util.ConfigProtect(myroot,
- mysettings.get("CONFIG_PROTECT","").split(),
- mysettings.get("CONFIG_PROTECT_MASK","").split())
- self.updateprotect = protect_obj.updateprotect
- self._config_protect = protect_obj
- self._installed_instance = None
- self.contentscache=[]
- self._contents_inodes = None
-
- def lockdb(self):
- if self._lock_vdb:
- raise AssertionError("Lock already held.")
- # At least the parent needs to exist for the lock file.
- portage.util.ensure_dirs(self.dbroot)
- self._lock_vdb = portage.locks.lockdir(self.dbroot)
-
- def unlockdb(self):
- if self._lock_vdb:
- portage.locks.unlockdir(self._lock_vdb)
- self._lock_vdb = None
-
- def getpath(self):
- "return path to location of db information (for >>> informational display)"
- return self.dbdir
-
- def exists(self):
- "does the db entry exist? boolean."
- return os.path.exists(self.dbdir)
-
- def create(self):
- "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
- """
- This function should never get called (there is no reason to use it).
- """
- # XXXXX Delete this eventually
- raise Exception, "This is bad. Don't use it."
- if not os.path.exists(self.dbdir):
- os.makedirs(self.dbdir)
-
- def delete(self):
- """
- Remove this entry from the database
- """
- if not os.path.exists(self.dbdir):
- return
- try:
- for x in listdir(self.dbdir):
- os.unlink(self.dbdir+"/"+x)
- os.rmdir(self.dbdir)
- except OSError, e:
- print "!!! Unable to remove db entry for this package."
- print "!!! It is possible that a directory is in this one. Portage will still"
- print "!!! register this package as installed as long as this directory exists."
- print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
- print "!!! "+str(e)
- print
- sys.exit(1)
-
- def clearcontents(self):
- """
- For a given db entry (self), erase the CONTENTS values.
- """
- if os.path.exists(self.dbdir+"/CONTENTS"):
- os.unlink(self.dbdir+"/CONTENTS")
-
- def getcontents(self):
- """
- Get the installed files of a given package (aka what that package installed)
- """
- if not os.path.exists(self.dbdir+"/CONTENTS"):
- return None
- if self.contentscache != []:
- return self.contentscache
- pkgfiles={}
- myc=open(self.dbdir+"/CONTENTS","r")
- mylines=myc.readlines()
- myc.close()
- null_byte = "\0"
- contents_file = os.path.join(self.dbdir, "CONTENTS")
- pos = 0
- for line in mylines:
- pos += 1
- if null_byte in line:
- # Null bytes are a common indication of corruption.
- writemsg("!!! Null byte found in contents " + \
- "file, line %d: '%s'\n" % (pos, contents_file),
- noiselevel=-1)
- continue
- mydat = line.split()
- # we do this so we can remove from non-root filesystems
- # (use the ROOT var to allow maintenance on other partitions)
- try:
- mydat[1] = normalize_path(os.path.join(
- self.myroot, mydat[1].lstrip(os.path.sep)))
- if mydat[0]=="obj":
- #format: type, mtime, md5sum
- pkgfiles[" ".join(mydat[1:-2])]=[mydat[0], mydat[-1], mydat[-2]]
- elif mydat[0]=="dir":
- #format: type
- pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
- elif mydat[0]=="sym":
- #format: type, mtime, dest
- x=len(mydat)-1
- if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
- mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
- writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
- x=len(mydat)-1
- splitter=-1
- while(x>=0):
- if mydat[x]=="->":
- splitter=x
- break
- x=x-1
- if splitter==-1:
- return None
- pkgfiles[" ".join(mydat[1:splitter])]=[mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
- elif mydat[0]=="dev":
- #format: type
- pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
- elif mydat[0]=="fif":
- #format: type
- pkgfiles[" ".join(mydat[1:])]=[mydat[0]]
- else:
- return None
- except (KeyError,IndexError):
- print "portage: CONTENTS line",pos,"corrupt!"
- self.contentscache=pkgfiles
- return pkgfiles
-
- def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
- ldpath_mtimes=None):
- """
- Calls prerm
- Unmerges a given package (CPV)
- calls postrm
- calls cleanrm
- calls env_update
-
- @param pkgfiles: files to unmerge (generally self.getcontents() )
- @type pkgfiles: Dictionary
- @param trimworld: Remove CPV from world file if True, not if False
- @type trimworld: Boolean
- @param cleanup: cleanup to pass to doebuild (see doebuild)
- @type cleanup: Boolean
- @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
- @type ldpath_mtimes: Dictionary
- @rtype: Integer
- @returns:
- 1. os.EX_OK if everything went well.
- 2. return code of the failed phase (for prerm, postrm, cleanrm)
-
- Notes:
- The caller must ensure that lockdb() and unlockdb() are called
- before and after this method.
- """
-
- contents = self.getcontents()
- # Now, don't assume that the name of the ebuild is the same as the
- # name of the dir; the package may have been moved.
- myebuildpath = None
- mystuff = listdir(self.dbdir, EmptyOnError=1)
- for x in mystuff:
- if x.endswith(".ebuild"):
- myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
- if x[:-7] != self.pkg:
- # Clean up after vardbapi.move_ent() breakage in
- # portage versions before 2.1.2
- os.rename(os.path.join(self.dbdir, x), myebuildpath)
- write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
- break
-
- self.settings.load_infodir(self.dbdir)
- if myebuildpath:
- try:
- doebuild_environment(myebuildpath, "prerm", self.myroot,
- self.settings, 0, 0, self.vartree.dbapi)
- except portage.exception.UnsupportedAPIException, e:
- # Sometimes this happens due to corruption of the EAPI file.
- writemsg("!!! FAILED prerm: %s\n" % \
- os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
- writemsg("%s\n" % str(e), noiselevel=-1)
- return 1
- catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
- portage.util.ensure_dirs(os.path.dirname(catdir),
- uid=portage_uid, gid=portage_gid, mode=070, mask=0)
- builddir_lock = None
- catdir_lock = None
- try:
- if myebuildpath:
- catdir_lock = portage.locks.lockdir(catdir)
- portage.util.ensure_dirs(catdir,
- uid=portage_uid, gid=portage_gid,
- mode=070, mask=0)
- builddir_lock = portage.locks.lockdir(
- self.settings["PORTAGE_BUILDDIR"])
- try:
- portage.locks.unlockdir(catdir_lock)
- finally:
- catdir_lock = None
- # Eventually, we'd like to pass in the saved ebuild env here...
- retval = doebuild(myebuildpath, "prerm", self.myroot,
- self.settings, cleanup=cleanup, use_cache=0,
- mydbapi=self.vartree.dbapi, tree="vartree",
- vartree=self.vartree)
- # XXX: Decide how to handle failures here.
- if retval != os.EX_OK:
- writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
- return retval
-
- self._unmerge_pkgfiles(pkgfiles)
-
- if myebuildpath:
- retval = doebuild(myebuildpath, "postrm", self.myroot,
- self.settings, use_cache=0, tree="vartree",
- mydbapi=self.vartree.dbapi, vartree=self.vartree)
-
- # process logs created during pre/postrm
- elog_process(self.mycpv, self.settings)
-
- # XXX: Decide how to handle failures here.
- if retval != os.EX_OK:
- writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
- return retval
- doebuild(myebuildpath, "cleanrm", self.myroot, self.settings,
- tree="vartree", mydbapi=self.vartree.dbapi,
- vartree=self.vartree)
-
- finally:
- if builddir_lock:
- portage.locks.unlockdir(builddir_lock)
- try:
- if myebuildpath and not catdir_lock:
- # Lock catdir for removal if empty.
- catdir_lock = portage.locks.lockdir(catdir)
- finally:
- if catdir_lock:
- try:
- os.rmdir(catdir)
- except OSError, e:
- if e.errno not in (errno.ENOENT,
- errno.ENOTEMPTY, errno.EEXIST):
- raise
- del e
- portage.locks.unlockdir(catdir_lock)
- env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
- contents=contents)
- return os.EX_OK
-
- def _unmerge_pkgfiles(self, pkgfiles):
- """
-
- Unmerges the contents of a package from the liveFS
- Removes the VDB entry for self
-
- @param pkgfiles: typically self.getcontents()
- @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
- @rtype: None
- """
- global dircache
- dircache={}
-
- if not pkgfiles:
- writemsg_stdout("No package files given... Grabbing a set.\n")
- pkgfiles=self.getcontents()
-
- if pkgfiles:
- mykeys=pkgfiles.keys()
- mykeys.sort()
- mykeys.reverse()
-
- #process symlinks second-to-last, directories last.
- mydirs=[]
- modprotect="/lib/modules/"
- for objkey in mykeys:
- obj = normalize_path(objkey)
- if obj[:2]=="//":
- obj=obj[1:]
- statobj = None
- try:
- statobj = os.stat(obj)
- except OSError:
- pass
- lstatobj = None
- try:
- lstatobj = os.lstat(obj)
- except (OSError, AttributeError):
- pass
- islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
- if statobj is None:
- if not islink:
- #we skip this if we're dealing with a symlink
- #because os.stat() will operate on the
- #link target rather than the link itself.
- writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
- continue
- # next line includes a tweak to protect modules from being unmerged,
- # but we don't protect modules from being overwritten if they are
- # upgraded. We effectively only want one half of the config protection
- # functionality for /lib/modules. For portage-ng both capabilities
- # should be able to be independently specified.
- if obj.startswith(modprotect):
- writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
- continue
-
- lmtime=str(lstatobj[stat.ST_MTIME])
- if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
- writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
- continue
-
- if pkgfiles[objkey][0]=="dir":
- if statobj is None or not stat.S_ISDIR(statobj.st_mode):
- writemsg_stdout("--- !dir %s %s\n" % ("dir", obj))
- continue
- mydirs.append(obj)
- elif pkgfiles[objkey][0]=="sym":
- if not islink:
- writemsg_stdout("--- !sym %s %s\n" % ("sym", obj))
- continue
- try:
- os.unlink(obj)
- writemsg_stdout("<<< %s %s\n" % ("sym",obj))
- except (OSError,IOError),e:
- writemsg_stdout("!!! %s %s\n" % ("sym",obj))
- elif pkgfiles[objkey][0]=="obj":
- if statobj is None or not stat.S_ISREG(statobj.st_mode):
- writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
- continue
- mymd5 = None
- try:
- mymd5 = portage.checksum.perform_md5(obj, calc_prelink=1)
- except portage.exception.FileNotFound, e:
- # the file has disappeared between now and our stat call
- writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
- continue
-
- # string.lower is needed because db entries used to be in upper-case. The
- # string.lower allows for backwards compatibility.
- if mymd5 != pkgfiles[objkey][2].lower():
- writemsg_stdout("--- !md5 %s %s\n" % ("obj", obj))
- continue
- try:
- os.unlink(obj)
- except (OSError,IOError),e:
- pass
- writemsg_stdout("<<< %s %s\n" % ("obj",obj))
- elif pkgfiles[objkey][0]=="fif":
- if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
- writemsg_stdout("--- !fif %s %s\n" % ("fif", obj))
- continue
- writemsg_stdout("--- %s %s\n" % ("fif",obj))
- elif pkgfiles[objkey][0]=="dev":
- writemsg_stdout("--- %s %s\n" % ("dev",obj))
-
- mydirs.sort()
- mydirs.reverse()
-
- for obj in mydirs:
- try:
- os.rmdir(obj)
- writemsg_stdout("<<< %s %s\n" % ("dir",obj))
- except (OSError, IOError):
- writemsg_stdout("--- !empty dir %s\n" % obj)
-
- #remove self from vartree database so that our own virtual gets zapped if we're the last node
- self.vartree.zap(self.mycpv)
-
- def isowner(self,filename,destroot):
- """
- Check if filename is a new file or belongs to this package
- (for this or a previous version)
-
- @param filename:
- @type filename:
- @param destroot:
- @type destroot:
- @rtype: Boolean
- @returns:
- 1. True if this package owns the file.
- 2. False if this package does not own the file.
- """
- destfile = normalize_path(
- os.path.join(destroot, filename.lstrip(os.path.sep)))
- try:
- mylstat = os.lstat(destfile)
- except (OSError, IOError):
- return True
-
- pkgfiles = self.getcontents()
- if pkgfiles and filename in pkgfiles:
- return True
- if pkgfiles:
- if self._contents_inodes is None:
- self._contents_inodes = set()
- for x in pkgfiles:
- try:
- lstat = os.lstat(x)
- self._contents_inodes.add((lstat.st_dev, lstat.st_ino))
- except OSError:
- pass
- if (mylstat.st_dev, mylstat.st_ino) in self._contents_inodes:
- return True
-
- return False
-
- def isprotected(self, filename):
- """In cases where an installed package in the same slot owns a
- protected file that will be merged, bump the mtime on the installed
- file in order to ensure that it isn't unmerged."""
- if not self._config_protect.isprotected(filename):
- return False
- if self._installed_instance is None:
- return True
- mydata = self._installed_instance.getcontents().get(filename, None)
- if mydata is None:
- return True
-
- # Bump the mtime in order to ensure that the old config file doesn't
- # get unmerged. The user will have an opportunity to merge the new
- # config with the old one.
- try:
- os.utime(filename, None)
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- del e
- # The file has disappeared, so it's not protected.
- return False
- return True
-
- def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
- mydbapi=None, prev_mtimes=None):
- """
-
- This function does the following:
-
- Collision Protection.
- calls doebuild(mydo=pkg_preinst)
- Merges the package to the livefs
- unmerges old version (if required)
- calls doebuild(mydo=pkg_postinst)
- calls env_update
-
- @param srcroot: Typically this is ${D}
- @type srcroot: String (Path)
- @param destroot: Path to merge to (usually ${ROOT})
- @type destroot: String (Path)
- @param inforoot: root of the vardb entry ?
- @type inforoot: String (Path)
- @param myebuild: path to the ebuild that we are processing
- @type myebuild: String (Path)
- @param mydbapi: dbapi which is handed to doebuild.
- @type mydbapi: portdbapi instance
- @param prev_mtimes: { Filename:mtime } mapping for env_update
- @type prev_mtimes: Dictionary
- @rtype: Boolean
- @returns:
- 1. 0 on success
- 2. 1 on failure
-
- secondhand is a list of symlinks that have been skipped due to their target
- not existing; we will merge these symlinks at a later time.
- """
- if not os.path.isdir(srcroot):
- writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
- noiselevel=-1)
- return 1
-
- if not os.path.exists(self.dbcatdir):
- os.makedirs(self.dbcatdir)
-
- otherversions=[]
- for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
- otherversions.append(v.split("/")[1])
-
- slot_matches = self.vartree.dbapi.match(
- "%s:%s" % (self.mysplit[0], self.settings["SLOT"]))
- if slot_matches:
- # Used by self.isprotected().
- self._installed_instance = dblink(self.cat,
- catsplit(slot_matches[0])[1], destroot, self.settings,
- vartree=self.vartree)
-
- # check for package collisions
- if "collision-protect" in self.settings.features:
- collision_ignore = set([normalize_path(myignore) for myignore in \
- self.settings.get("COLLISION_IGNORE", "").split()])
- myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
-
- # the linkcheck only works if we are in srcroot
- mycwd = getcwd()
- os.chdir(srcroot)
- mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
- myfilelist.extend(mysymlinks)
- mysymlinked_directories = [s + os.path.sep for s in mysymlinks]
- del mysymlinks
-
-
- stopmerge=False
- starttime=time.time()
- i=0
-
- otherpkg=[]
- mypkglist=[]
-
- if self.pkg in otherversions:
- otherversions.remove(self.pkg) # we already checked this package
-
- myslot = self.settings["SLOT"]
- for v in otherversions:
- # only allow versions with same slot to overwrite files
- if myslot == self.vartree.dbapi.aux_get("/".join((self.cat, v)), ["SLOT"])[0]:
- mypkglist.append(
- dblink(self.cat, v, destroot, self.settings,
- vartree=self.vartree))
-
- collisions = []
-
- print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
- for f in myfilelist:
- nocheck = False
- # listdir isn't intelligent enough to exclude symlinked dirs,
- # so we have to do it ourself
- for s in mysymlinked_directories:
- if f.startswith(s):
- nocheck = True
- break
- if nocheck:
- continue
- i=i+1
- if i % 1000 == 0:
- print str(i)+" files checked ..."
- if f[0] != "/":
- f="/"+f
- isowned = False
- for ver in [self]+mypkglist:
- if (ver.isowner(f, destroot) or ver.isprotected(f)):
- isowned = True
- break
- if not isowned:
- collisions.append(f)
- print "existing file "+f+" is not owned by this package"
- stopmerge=True
- if collision_ignore:
- if f in collision_ignore:
- stopmerge = False
- else:
- for myignore in collision_ignore:
- if f.startswith(myignore + os.path.sep):
- stopmerge = False
- break
- #print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
- if stopmerge:
- print red("*")+" This package is blocked because it wants to overwrite"
- print red("*")+" files belonging to other packages (see messages above)."
- print red("*")+" If you have no clue what this is all about report it "
- print red("*")+" as a bug for this package on http://bugs.gentoo.org"
- print
- print red("package "+self.cat+"/"+self.pkg+" NOT merged")
- print
- print
- print "Searching all installed packages for file collisions..."
- print "Press Ctrl-C to Stop"
- print
- """ Note: The isowner calls result in a stat call for *every*
- single installed file, since the inode numbers are used to work
- around the problem of ambiguous paths caused by symlinked files
- and/or directories. Though it is slow, it is as accurate as
- possible."""
- found_owner = False
- for cpv in self.vartree.dbapi.cpv_all():
- cat, pkg = catsplit(cpv)
- mylink = dblink(cat, pkg, destroot, self.settings,
- vartree=self.vartree)
- mycollisions = []
- for f in collisions:
- if mylink.isowner(f, destroot):
- mycollisions.append(f)
- if mycollisions:
- found_owner = True
- print " * %s:" % cpv
- print
- for f in mycollisions:
- print " '%s'" % \
- os.path.join(destroot, f.lstrip(os.path.sep))
- print
- if not found_owner:
- print "None of the installed packages claim the above file(s)."
- print
- sys.exit(1)
- try:
- os.chdir(mycwd)
- except OSError:
- pass
-
- if os.stat(srcroot).st_dev == os.stat(destroot).st_dev:
- """ The merge process may move files out of the image directory,
- which causes invalidation of the .installed flag."""
- try:
- os.unlink(os.path.join(
- os.path.dirname(normalize_path(srcroot)), ".installed"))
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- del e
-
- # get old contents info for later unmerging
- oldcontents = self.getcontents()
-
- self.dbdir = self.dbtmpdir
- self.delete()
- if not os.path.exists(self.dbtmpdir):
- os.makedirs(self.dbtmpdir)
-
- writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
-
- # run preinst script
- if myebuild is None:
- myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
- a = doebuild(myebuild, "preinst", destroot, self.settings, cleanup=cleanup,
- use_cache=0, tree=self.treetype, mydbapi=mydbapi,
- vartree=self.vartree)
-
- # XXX: Decide how to handle failures here.
- if a != os.EX_OK:
- writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
- return a
-
- # copy "info" files (like SLOT, CFLAGS, etc.) into the database
- for x in listdir(inforoot):
- self.copyfile(inforoot+"/"+x)
-
- # get current counter value (counter_tick also takes care of incrementing it)
- # XXX Need to make this destroot, but it needs to be initialized first. XXX
- # XXX bis: leads to some invalidentry() call through cp_all().
- counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
- # write local package counter for recording
- lcfile = open(self.dbtmpdir+"/COUNTER","w")
- lcfile.write(str(counter))
- lcfile.close()
-
- # open CONTENTS file (possibly overwriting old one) for recording
- outfile=open(self.dbtmpdir+"/CONTENTS","w")
-
- self.updateprotect()
-
- #if we have a file containing previously-merged config file md5sums, grab it.
- conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
- cfgfiledict = grabdict(conf_mem_file)
- if self.settings.has_key("NOCONFMEM"):
- cfgfiledict["IGNORE"]=1
- else:
- cfgfiledict["IGNORE"]=0
-
- # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
- mymtime = long(time.time())
- prevmask = os.umask(0)
- secondhand = []
-
- # we do a first merge; this will recurse through all files in our srcroot but also build up a
- # "second hand" of symlinks to merge later
- if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
- return 1
-
- # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
- # broken symlinks. We'll merge them too.
- lastlen=0
- while len(secondhand) and len(secondhand)!=lastlen:
- # clear the thirdhand. Anything from our second hand that
- # couldn't get merged will be added to thirdhand.
-
- thirdhand=[]
- self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
-
- #swap hands
- lastlen=len(secondhand)
-
- # our thirdhand now becomes our secondhand. It's ok to throw
- # away secondhand since thirdhand contains all the stuff that
- # couldn't be merged.
- secondhand = thirdhand
-
- if len(secondhand):
- # force merge of remaining symlinks (broken or circular; oh well)
- self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
-
- #restore umask
- os.umask(prevmask)
-
- #if we opened it, close it
- outfile.flush()
- outfile.close()
-
- if os.path.exists(self.dbpkgdir):
- writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
- self.dbdir = self.dbpkgdir
- self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes)
- self.dbdir = self.dbtmpdir
- writemsg_stdout(">>> Original instance of package unmerged safely.\n")
-
- # We hold both directory locks.
- self.dbdir = self.dbpkgdir
- self.delete()
- movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
- contents = self.getcontents()
-
- #write out our collection of md5sums
- if cfgfiledict.has_key("IGNORE"):
- del cfgfiledict["IGNORE"]
-
- my_private_path = os.path.join(destroot, PRIVATE_PATH)
- if not os.path.exists(my_private_path):
- os.makedirs(my_private_path)
- os.chown(my_private_path, os.getuid(), portage_gid)
- os.chmod(my_private_path, 02770)
-
- writedict(cfgfiledict, conf_mem_file)
- del conf_mem_file
-
- #do postinst script
- a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
- tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
-
- # XXX: Decide how to handle failures here.
- if a != os.EX_OK:
- writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
- return a
-
- downgrade = False
- for v in otherversions:
- if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
- downgrade = True
-
- #update environment settings, library paths. DO NOT change symlinks.
- env_update(makelinks=(not downgrade),
- target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
- contents=contents)
- #dircache may break autoclean because it remembers the -MERGING-pkg file
- global dircache
- if dircache.has_key(self.dbcatdir):
- del dircache[self.dbcatdir]
- writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
-
- # Process ebuild logfiles
- elog_process(self.mycpv, self.settings)
- if "noclean" not in self.settings.features:
- doebuild(myebuild, "clean", destroot, self.settings,
- tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
- return os.EX_OK
-
- def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
- """
-
- This function handles actual merging of the package contents to the livefs.
- It also handles config protection.
-
- @param srcroot: Where are we copying files from (usually ${D})
- @type srcroot: String (Path)
- @param destroot: Typically ${ROOT}
- @type destroot: String (Path)
- @param outfile: File to log operations to
- @type outfile: File Object
- @param secondhand: A set of items to merge in pass two (usually
- or symlinks that point to non-existing files that may get merged later)
- @type secondhand: List
- @param stufftomerge: Either a diretory to merge, or a list of items.
- @type stufftomerge: String or List
- @param cfgfiledict: { File:mtime } mapping for config_protected files
- @type cfgfiledict: Dictionary
- @param thismtime: The current time (typically long(time.time())
- @type thismtime: Long
- @rtype: None or Boolean
- @returns:
- 1. True on failure
- 2. None otherwise
-
- """
- from os.path import sep, join
- srcroot = normalize_path(srcroot).rstrip(sep) + sep
- destroot = normalize_path(destroot).rstrip(sep) + sep
- # this is supposed to merge a list of files. There will be 2 forms of argument passing.
- if type(stufftomerge)==types.StringType:
- #A directory is specified. Figure out protection paths, listdir() it and process it.
- mergelist = listdir(join(srcroot, stufftomerge))
- offset=stufftomerge
- else:
- mergelist=stufftomerge
- offset=""
- for x in mergelist:
- mysrc = join(srcroot, offset, x)
- mydest = join(destroot, offset, x)
- # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
- myrealdest = join(sep, offset, x)
- # stat file once, test using S_* macros many times (faster that way)
- try:
- mystat=os.lstat(mysrc)
- except OSError, e:
- writemsg("\n")
- writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
- writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
- writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
- writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
- writemsg(red("!!! File: ")+str(mysrc)+"\n", noiselevel=-1)
- writemsg(red("!!! Error: ")+str(e)+"\n", noiselevel=-1)
- sys.exit(1)
- except Exception, e:
- writemsg("\n")
- writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
- writemsg(red("!!! A stat call returned the following error for the following file:"))
- writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
- writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
- writemsg( "!!! File: "+str(mysrc)+"\n", noiselevel=-1)
- writemsg( "!!! Error: "+str(e)+"\n", noiselevel=-1)
- sys.exit(1)
-
-
- mymode=mystat[stat.ST_MODE]
- # handy variables; mydest is the target object on the live filesystems;
- # mysrc is the source object in the temporary install dir
- try:
- mydmode = os.lstat(mydest).st_mode
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- del e
- #dest file doesn't exist
- mydmode=None
-
- if stat.S_ISLNK(mymode):
- # we are merging a symbolic link
- myabsto=abssymlink(mysrc)
- if myabsto.startswith(srcroot):
- myabsto=myabsto[len(srcroot):]
- myabsto = myabsto.lstrip(sep)
- myto=os.readlink(mysrc)
- if self.settings and self.settings["D"]:
- if myto.startswith(self.settings["D"]):
- myto=myto[len(self.settings["D"]):]
- # myrealto contains the path of the real file to which this symlink points.
- # we can simply test for existence of this file to see if the target has been merged yet
- myrealto = normalize_path(os.path.join(destroot, myabsto))
- if mydmode!=None:
- #destination exists
- if not stat.S_ISLNK(mydmode):
- if stat.S_ISDIR(mydmode):
- # directory in the way: we can't merge a symlink over a directory
- # we won't merge this, continue with next file...
- continue
-
- if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
- # Kill file blocking installation of symlink to dir #71787
- pass
- elif self.isprotected(mydest):
- # Use md5 of the target in ${D} if it exists...
- try:
- newmd5 = portage.checksum.perform_md5(
- join(srcroot, myabsto))
- except portage.exception.FileNotFound:
- # Maybe the target is merged already.
- try:
- newmd5 = portage.checksum.perform_md5(
- myrealto)
- except portage.exception.FileNotFound:
- newmd5 = None
- mydest = new_protect_filename(mydest,newmd5=newmd5)
-
- # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
- if (secondhand!=None) and (not os.path.exists(myrealto)):
- # either the target directory doesn't exist yet or the target file doesn't exist -- or
- # the target is a broken symlink. We will add this file to our "second hand" and merge
- # it later.
- secondhand.append(mysrc[len(srcroot):])
- continue
- # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
- mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
- if mymtime!=None:
- writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
- outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
- else:
- print "!!! Failed to move file."
- print "!!!",mydest,"->",myto
- sys.exit(1)
- elif stat.S_ISDIR(mymode):
- # we are merging a directory
- if mydmode!=None:
- # destination exists
-
- if bsd_chflags:
- # Save then clear flags on dest.
- dflags = os.lstat(mydest).st_flags
- if dflags != 0:
- bsd_chflags.lchflags(mydest, 0)
-
- if not os.access(mydest, os.W_OK):
- pkgstuff = pkgsplit(self.pkg)
- writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
- writemsg("!!! Please check permissions and directories for broken symlinks.\n")
- writemsg("!!! You may start the merge process again by using ebuild:\n")
- writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
- writemsg("!!! And finish by running this: env-update\n\n")
- return 1
-
- if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
- # a symlink to an existing directory will work for us; keep it:
- writemsg_stdout("--- %s/\n" % mydest)
- if bsd_chflags:
- bsd_chflags.lchflags(mydest, dflags)
- else:
- # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
- if movefile(mydest,mydest+".backup", mysettings=self.settings) is None:
- sys.exit(1)
- print "bak",mydest,mydest+".backup"
- #now create our directory
- if self.settings.selinux_enabled():
- sid = selinux.get_sid(mysrc)
- selinux.secure_mkdir(mydest,sid)
- else:
- os.mkdir(mydest)
- if bsd_chflags:
- bsd_chflags.lchflags(mydest, dflags)
- os.chmod(mydest,mystat[0])
- os.chown(mydest,mystat[4],mystat[5])
- writemsg_stdout(">>> %s/\n" % mydest)
- else:
- #destination doesn't exist
- if self.settings.selinux_enabled():
- sid = selinux.get_sid(mysrc)
- selinux.secure_mkdir(mydest,sid)
- else:
- os.mkdir(mydest)
- os.chmod(mydest,mystat[0])
- os.chown(mydest,mystat[4],mystat[5])
- writemsg_stdout(">>> %s/\n" % mydest)
- outfile.write("dir "+myrealdest+"\n")
- # recurse and merge this directory
- if self.mergeme(srcroot, destroot, outfile, secondhand,
- join(offset, x), cfgfiledict, thismtime):
- return 1
- elif stat.S_ISREG(mymode):
- # we are merging a regular file
- mymd5=portage.checksum.perform_md5(mysrc,calc_prelink=1)
- # calculate config file protection stuff
- mydestdir=os.path.dirname(mydest)
- moveme=1
- zing="!!!"
- if mydmode!=None:
- # destination file exists
- if stat.S_ISDIR(mydmode):
- # install of destination is blocked by an existing directory with the same name
- moveme=0
- writemsg_stdout("!!! %s\n" % mydest)
- elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
- cfgprot=0
- # install of destination is blocked by an existing regular file,
- # or by a symlink to an existing regular file;
- # now, config file management may come into play.
- # we only need to tweak mydest if cfg file management is in play.
- if self.isprotected(mydest):
- # we have a protection path; enable config file management.
- destmd5=portage.checksum.perform_md5(mydest,calc_prelink=1)
- if mymd5==destmd5:
- #file already in place; simply update mtimes of destination
- os.utime(mydest,(thismtime,thismtime))
- zing="---"
- moveme=0
- else:
- if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
- """ An identical update has previously been
- merged. Skip it unless the user has chosen
- --noconfmem."""
- zing = "-o-"
- moveme = cfgfiledict["IGNORE"]
- cfgprot = cfgfiledict["IGNORE"]
- else:
- moveme = 1
- cfgprot = 1
- if moveme:
- # Merging a new file, so update confmem.
- cfgfiledict[myrealdest] = [mymd5]
- elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
- """A previously remembered update has been
- accepted, so it is removed from confmem."""
- del cfgfiledict[myrealdest]
- if cfgprot:
- mydest = new_protect_filename(mydest, newmd5=mymd5)
-
- # whether config protection or not, we merge the new file the
- # same way. Unless moveme=0 (blocking directory)
- if moveme:
- mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
- if mymtime is None:
- sys.exit(1)
- zing=">>>"
- else:
- mymtime=thismtime
- # We need to touch the destination so that on --update the
- # old package won't yank the file with it. (non-cfgprot related)
- os.utime(mydest,(thismtime,thismtime))
- zing="---"
- if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
-
- # XXX kludge, can be killed when portage stops relying on
- # md5+mtime, and uses refcounts
- # alright, we've fooled w/ mtime on the file; this pisses off static archives
- # basically internal mtime != file's mtime, so the linker (falsely) thinks
- # the archive is stale, and needs to have it's toc rebuilt.
-
- myf = open(mydest, "r+")
-
- # ar mtime field is digits padded with spaces, 12 bytes.
- lms=str(thismtime+5).ljust(12)
- myf.seek(0)
- magic=myf.read(8)
- if magic != "!<arch>\n":
- # not an archive (dolib.a from portage.py makes it here fex)
- myf.close()
- else:
- st = os.stat(mydest)
- while myf.tell() < st.st_size - 12:
- # skip object name
- myf.seek(16,1)
-
- # update mtime
- myf.write(lms)
-
- # skip uid/gid/mperm
- myf.seek(20,1)
-
- # read the archive member's size
- x=long(myf.read(10))
-
- # skip the trailing newlines, and add the potential
- # extra padding byte if it's not an even size
- myf.seek(x + 2 + (x % 2),1)
-
- # and now we're at the end. yay.
- myf.close()
- mymd5 = portage.checksum.perform_md5(mydest, calc_prelink=1)
- os.utime(mydest,(thismtime,thismtime))
-
- if mymtime!=None:
- zing=">>>"
- outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
- writemsg_stdout("%s %s\n" % (zing,mydest))
- else:
- # we are merging a fifo or device node
- zing="!!!"
- if mydmode is None:
- # destination doesn't exist
- if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
- zing=">>>"
- else:
- sys.exit(1)
- if stat.S_ISFIFO(mymode):
- outfile.write("fif %s\n" % myrealdest)
- else:
- outfile.write("dev %s\n" % myrealdest)
- writemsg_stdout(zing+" "+mydest+"\n")
-
- def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
- mydbapi=None, prev_mtimes=None):
- try:
- self.lockdb()
- return self.treewalk(mergeroot, myroot, inforoot, myebuild,
- cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
- finally:
- self.unlockdb()
-
- def getstring(self,name):
- "returns contents of a file with whitespace converted to spaces"
- if not os.path.exists(self.dbdir+"/"+name):
- return ""
- myfile=open(self.dbdir+"/"+name,"r")
- mydata=myfile.read().split()
- myfile.close()
- return " ".join(mydata)
-
- def copyfile(self,fname):
- shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
-
- def getfile(self,fname):
- if not os.path.exists(self.dbdir+"/"+fname):
- return ""
- myfile=open(self.dbdir+"/"+fname,"r")
- mydata=myfile.read()
- myfile.close()
- return mydata
-
- def setfile(self,fname,data):
- write_atomic(os.path.join(self.dbdir, fname), data)
-
- def getelements(self,ename):
- if not os.path.exists(self.dbdir+"/"+ename):
- return []
- myelement=open(self.dbdir+"/"+ename,"r")
- mylines=myelement.readlines()
- myreturn=[]
- for x in mylines:
- for y in x[:-1].split():
- myreturn.append(y)
- myelement.close()
- return myreturn
-
- def setelements(self,mylist,ename):
- myelement=open(self.dbdir+"/"+ename,"w")
- for x in mylist:
- myelement.write(x+"\n")
- myelement.close()
-
- def isregular(self):
- "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
- return os.path.exists(self.dbdir+"/CATEGORY")
+from portage.dbapi import dbapi
+from portage.dbapi.virtual import fakedbapi
+from portage.dbapi.bintree import bindbapi, binarytree
+from portage.dbapi.vartree import vardbapi, vartree, dblink
+from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
class FetchlistDict(UserDict.DictMixin):
"""This provide a mapping interface to retrieve fetch lists. It's used
diff --git a/pym/portage/dbapi/__init__.py b/pym/portage/dbapi/__init__.py
new file mode 100644
index 000000000..c6067fd49
--- /dev/null
+++ b/pym/portage/dbapi/__init__.py
@@ -0,0 +1,53 @@
+from portage import dep_expand, dep_getkey, match_from_list, writemsg
+from portage.dep import dep_getslot
+from portage.locks import unlockfile
+from portage.output import red
+
+import os, re
+
+class dbapi(object):
+ def __init__(self):
+ pass
+
+ def close_caches(self):
+ pass
+
+ def cp_list(self, cp, use_cache=1):
+ return
+
+ def cpv_all(self):
+ cpv_list = []
+ for cp in self.cp_all():
+ cpv_list.extend(self.cp_list(cp))
+ return cpv_list
+
+ def aux_get(self, mycpv, mylist):
+ "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
+ raise NotImplementedError
+
+ def match(self, origdep, use_cache=1):
+ mydep = dep_expand(origdep, mydb=self, settings=self.settings)
+ mykey = dep_getkey(mydep)
+ mylist = match_from_list(mydep, self.cp_list(mykey, use_cache=use_cache))
+ myslot = dep_getslot(mydep)
+ if myslot is not None:
+ mylist = [cpv for cpv in mylist \
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot]
+ return mylist
+
+ def invalidentry(self, mypath):
+ if re.search("portage_lockfile$", mypath):
+ if not os.environ.has_key("PORTAGE_MASTER_PID"):
+ writemsg("Lockfile removed: %s\n" % mypath, 1)
+ unlockfile((mypath, None, None))
+ else:
+ # Nothing we can do about it. We're probably sandboxed.
+ pass
+ elif re.search(".*/-MERGING-(.*)", mypath):
+ if os.path.exists(mypath):
+ writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n", noiselevel=-1)
+ else:
+ writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
+
diff --git a/pym/portage/dbapi/bintree.py b/pym/portage/dbapi/bintree.py
new file mode 100644
index 000000000..a871dde8e
--- /dev/null
+++ b/pym/portage/dbapi/bintree.py
@@ -0,0 +1,520 @@
+from portage.dbapi.virtual import fakedbapi
+from portage.exception import InvalidPackageName, InvalidAtom
+from portage.output import green
+
+from portage import normalize_path, catpkgsplit, writemsg, writemsg_stdout, \
+ update_dbentries, catsplit, isvalidatom, isjustname, catpkgsplit, listdir, \
+ dep_getkey, dep_expand, match_from_list, best
+
+import portage.xpak, portage.getbinpkg
+
+import os, errno, stat
+
+class bindbapi(fakedbapi):
+ def __init__(self, mybintree=None, settings=None):
+ self.bintree = mybintree
+ self.cpvdict={}
+ self.cpdict={}
+ if settings is None:
+ from portage import settings
+ self.settings = settings
+ self._match_cache = {}
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(["SLOT"])
+ self._aux_cache = {}
+
+ def match(self, *pargs, **kwargs):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.match(self, *pargs, **kwargs)
+
+ def aux_get(self, mycpv, wants):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ cache_me = False
+ if not set(wants).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ return [aux_cache[x] for x in wants]
+ cache_me = True
+ mysplit = mycpv.split("/")
+ mylist = []
+ tbz2name = mysplit[1]+".tbz2"
+ if self.bintree and not self.bintree.isremote(mycpv):
+ tbz2 = portage.xpak.tbz2(self.bintree.getname(mycpv))
+ getitem = tbz2.getfile
+ else:
+ getitem = self.bintree.remotepkgs[tbz2name].get
+ mydata = {}
+ mykeys = wants
+ if cache_me:
+ mykeys = self._aux_cache_keys.union(wants)
+ for x in mykeys:
+ myval = getitem(x)
+ # myval is None if the key doesn't exist
+ # or the tbz2 is corrupt.
+ if myval:
+ mydata[x] = " ".join(myval.split())
+ if "EAPI" in mykeys:
+ if not mydata.setdefault("EAPI", "0"):
+ mydata["EAPI"] = "0"
+ if cache_me:
+ aux_cache = {}
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, "")
+ self._aux_cache[mycpv] = aux_cache
+ return [mydata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ tbz2path = self.bintree.getname(cpv)
+ if not os.path.exists(tbz2path):
+ raise KeyError(cpv)
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ mydata.update(values)
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+
+ def cp_list(self, *pargs, **kwargs):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_list(self, *pargs, **kwargs)
+
+ def cpv_all(self):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_all(self)
+
+
+class binarytree(object):
+ "this tree scans for a list of all packages available in PKGDIR"
+ def __init__(self, root, pkgdir, virtual=None, settings=None, clone=None):
+ if clone:
+ # XXX This isn't cloning. It's an instance of the same thing.
+ self.root = clone.root
+ self.pkgdir = clone.pkgdir
+ self.dbapi = clone.dbapi
+ self.populated = clone.populated
+ self.tree = clone.tree
+ self.remotepkgs = clone.remotepkgs
+ self.invalids = clone.invalids
+ self.settings = clone.settings
+ else:
+ self.root = root
+ #self.pkgdir=settings["PKGDIR"]
+ self.pkgdir = normalize_path(pkgdir)
+ self.dbapi = bindbapi(self, settings=settings)
+ self.populated = 0
+ self.tree = {}
+ self.remotepkgs = {}
+ self.invalids = []
+ self.settings = settings
+ self._pkg_paths = {}
+
+ def move_ent(self, mylist):
+ if not self.populated:
+ self.populate()
+ origcp = mylist[1]
+ newcp = mylist[2]
+ # sanity check
+ for cp in [origcp, newcp]:
+ if not (isvalidatom(cp) and isjustname(cp)):
+ raise InvalidPackageName(cp)
+ origcat = origcp.split("/")[0]
+ mynewcat = newcp.split("/")[0]
+ origmatches=self.dbapi.cp_list(origcp)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+
+ mycpsplit = catpkgsplit(mycpv)
+ mynewcpv = newcp + "-" + mycpsplit[2]
+ if mycpsplit[3] != "r0":
+ mynewcpv += "-" + mycpsplit[3]
+ myoldpkg = mycpv.split("/")[1]
+ mynewpkg = mynewcpv.split("/")[1]
+
+ if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
+ writemsg("!!! Cannot update binary: Destination exists.\n",
+ noiselevel=-1)
+ writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
+ continue
+
+ tbz2path = self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
+ noiselevel=-1)
+ continue
+
+ #print ">>> Updating data in:",mycpv
+ writemsg_stdout("%")
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ updated_items = update_dbentries([mylist], mydata)
+ mydata.update(updated_items)
+ mydata["CATEGORY"] = mynewcat+"\n"
+ if mynewpkg != myoldpkg:
+ mydata[mynewpkg+".ebuild"] = mydata[myoldpkg+".ebuild"]
+ del mydata[myoldpkg+".ebuild"]
+ mydata["PF"] = mynewpkg + "\n"
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+
+ self.dbapi.cpv_remove(mycpv)
+ del self._pkg_paths[mycpv]
+ new_path = self.getname(mynewcpv)
+ self._pkg_paths[mynewcpv] = os.path.join(
+ *new_path.split(os.path.sep)[-2:])
+ if new_path != mytbz2:
+ try:
+ os.makedirs(os.path.dirname(new_path))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ del e
+ os.rename(tbz2path, new_path)
+ self._remove_symlink(mycpv)
+ if new_path.split(os.path.sep)[-2] == "All":
+ self._create_symlink(mynewcpv)
+ self.dbapi.cpv_inject(mynewcpv)
+
+ return 1
+
+ def _remove_symlink(self, cpv):
+ """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
+ the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
+ removed if os.path.islink() returns False."""
+ mycat, mypkg = catsplit(cpv)
+ mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+ if os.path.islink(mylink):
+ """Only remove it if it's really a link so that this method never
+ removes a real package that was placed here to avoid a collision."""
+ os.unlink(mylink)
+ try:
+ os.rmdir(os.path.join(self.pkgdir, mycat))
+ except OSError, e:
+ if e.errno not in (errno.ENOENT,
+ errno.ENOTEMPTY, errno.EEXIST):
+ raise
+ del e
+
+ def _create_symlink(self, cpv):
+ """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
+ ${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
+ exist in the location of the symlink will first be removed."""
+ mycat, mypkg = catsplit(cpv)
+ full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+ try:
+ os.makedirs(os.path.dirname(full_path))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ del e
+ try:
+ os.unlink(full_path)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
+
+ def move_slot_ent(self, mylist):
+ if not self.populated:
+ self.populate()
+ pkg = mylist[1]
+ origslot = mylist[2]
+ newslot = mylist[3]
+
+ if not isvalidatom(pkg):
+ raise InvalidAtom(pkg)
+
+ origmatches = self.dbapi.match(pkg)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ mycpsplit = catpkgsplit(mycpv)
+ myoldpkg = mycpv.split("/")[1]
+ tbz2path = self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
+ noiselevel=-1)
+ continue
+
+ #print ">>> Updating data in:",mycpv
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+
+ slot = mydata["SLOT"]
+ if (not slot):
+ continue
+
+ if (slot[0] != origslot):
+ continue
+
+ writemsg_stdout("S")
+ mydata["SLOT"] = newslot+"\n"
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+ return 1
+
+ def update_ents(self, update_iter):
+ if len(update_iter) == 0:
+ return
+ if not self.populated:
+ self.populate()
+
+ for mycpv in self.dbapi.cp_all():
+ tbz2path = self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
+ noiselevel=-1)
+ continue
+ #print ">>> Updating binary data:",mycpv
+ writemsg_stdout("*")
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ updated_items = update_dbentries(update_iter, mydata)
+ if len(updated_items) > 0:
+ mydata.update(updated_items)
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+ return 1
+
+ def prevent_collision(self, cpv):
+ """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
+ use for a given cpv. If a collision will occur with an existing
+ package from another category, the existing package will be bumped to
+ ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
+ full_path = self.getname(cpv)
+ if "All" == full_path.split(os.path.sep)[-2]:
+ return
+ """Move a colliding package if it exists. Code below this point only
+ executes in rare cases."""
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ mypath = os.path.join("All", myfile)
+ dest_path = os.path.join(self.pkgdir, mypath)
+ if os.path.exists(dest_path):
+ # For invalid packages, other_cat could be None.
+ other_cat = portage.xpak.tbz2(dest_path).getfile("CATEGORY")
+ if other_cat:
+ other_cat = other_cat.strip()
+ self._move_from_all(other_cat + "/" + mypkg)
+ """The file may or may not exist. Move it if necessary and update
+ internal state for future calls to getname()."""
+ self._move_to_all(cpv)
+
+ def _move_to_all(self, cpv):
+ """If the file exists, move it. Whether or not it exists, update state
+ for future getname() calls."""
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ src_path = os.path.join(self.pkgdir, mycat, myfile)
+ try:
+ mystat = os.lstat(src_path)
+ except OSError, e:
+ mystat = None
+ if mystat and stat.S_ISREG(mystat.st_mode):
+ try:
+ os.makedirs(os.path.join(self.pkgdir, "All"))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ del e
+ os.rename(src_path, os.path.join(self.pkgdir, "All", myfile))
+ self._create_symlink(cpv)
+ self._pkg_paths[cpv] = os.path.join("All", myfile)
+
+ def _move_from_all(self, cpv):
+ """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
+ ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
+ self._remove_symlink(cpv)
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ mypath = os.path.join(mycat, myfile)
+ dest_path = os.path.join(self.pkgdir, mypath)
+ try:
+ os.makedirs(os.path.dirname(dest_path))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ del e
+ os.rename(os.path.join(self.pkgdir, "All", myfile), dest_path)
+ self._pkg_paths[cpv] = mypath
+
+ def populate(self, getbinpkgs=0, getbinpkgsonly=0):
+ "populates the binarytree"
+ if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
+ return 0
+ if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
+ return 0
+
+ if not getbinpkgsonly:
+ pkg_paths = {}
+ dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
+ if "All" in dirs:
+ dirs.remove("All")
+ dirs.sort()
+ dirs.insert(0, "All")
+ for mydir in dirs:
+ for myfile in listdir(os.path.join(self.pkgdir, mydir)):
+ if not myfile.endswith(".tbz2"):
+ continue
+ mypath = os.path.join(mydir, myfile)
+ full_path = os.path.join(self.pkgdir, mypath)
+ if os.path.islink(full_path):
+ continue
+ mytbz2 = portage.xpak.tbz2(full_path)
+ # For invalid packages, mycat could be None.
+ mycat = mytbz2.getfile("CATEGORY")
+ mypf = mytbz2.getfile("PF")
+ mypkg = myfile[:-5]
+ if not mycat or not mypf:
+ #old-style or corrupt package
+ writemsg("!!! Invalid binary package: '%s'\n" % full_path,
+ noiselevel=-1)
+ writemsg("!!! This binary package is not " + \
+ "recoverable and should be deleted.\n",
+ noiselevel=-1)
+ self.invalids.append(mypkg)
+ continue
+ mycat = mycat.strip()
+ if mycat != mydir and mydir != "All":
+ continue
+ if mypkg != mypf.strip():
+ continue
+ mycpv = mycat + "/" + mypkg
+ if mycpv in pkg_paths:
+ # All is first, so it's preferred.
+ continue
+ pkg_paths[mycpv] = mypath
+ self.dbapi.cpv_inject(mycpv)
+ self._pkg_paths = pkg_paths
+
+ if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
+ writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
+ noiselevel=-1)
+
+ if getbinpkgs and \
+ self.settings["PORTAGE_BINHOST"] and not self.remotepkgs:
+ try:
+ chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
+ if chunk_size < 8:
+ chunk_size = 8
+ except (ValueError, KeyError):
+ chunk_size = 3000
+
+ writemsg(green("Fetching binary packages info...\n"))
+ self.remotepkgs = portage.getbinpkg.dir_get_metadata(
+ self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
+ writemsg(green(" -- DONE!\n\n"))
+
+ for mypkg in self.remotepkgs.keys():
+ if not self.remotepkgs[mypkg].has_key("CATEGORY"):
+ #old-style or corrupt package
+ writemsg("!!! Invalid remote binary package: "+mypkg+"\n",
+ noiselevel=-1)
+ del self.remotepkgs[mypkg]
+ continue
+ mycat = self.remotepkgs[mypkg]["CATEGORY"].strip()
+ fullpkg = mycat+"/"+mypkg[:-5]
+ mykey = dep_getkey(fullpkg)
+ try:
+ # invalid tbz2's can hurt things.
+ #print "cpv_inject("+str(fullpkg)+")"
+ self.dbapi.cpv_inject(fullpkg)
+ #print " -- Injected"
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n",
+ noiselevel=-1)
+ del self.remotepkgs[mypkg]
+ continue
+ self.populated=1
+
+ def inject(self, cpv):
+ return self.dbapi.cpv_inject(cpv)
+
+ def exists_specific(self, cpv):
+ if not self.populated:
+ self.populate()
+ return self.dbapi.match(
+ dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
+
+ def dep_bestmatch(self, mydep):
+ "compatibility method -- all matches, not just visible ones"
+ if not self.populated:
+ self.populate()
+ writemsg("\n\n", 1)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mykey = dep_getkey(mydep)
+ writemsg("mykey: %s\n" % mykey, 1)
+ mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
+ writemsg("mymatch: %s\n" % mymatch, 1)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def getname(self, pkgname):
+ """Returns a file location for this package. The default location is
+ ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
+ in the rare event of a collision. The prevent_collision() method can
+ be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
+ specific cpv."""
+ if not self.populated:
+ self.populate()
+ mycpv = pkgname
+ mypath = self._pkg_paths.get(mycpv, None)
+ if mypath:
+ return os.path.join(self.pkgdir, mypath)
+ mycat, mypkg = catsplit(mycpv)
+ mypath = os.path.join("All", mypkg + ".tbz2")
+ if mypath in self._pkg_paths.values():
+ mypath = os.path.join(mycat, mypkg + ".tbz2")
+ self._pkg_paths[mycpv] = mypath # cache for future lookups
+ return os.path.join(self.pkgdir, mypath)
+
+ def isremote(self, pkgname):
+ "Returns true if the package is kept remotely."
+ mysplit = pkgname.split("/")
+ remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
+ return remote
+
+ def get_use(self, pkgname):
+ mysplit=pkgname.split("/")
+ if self.isremote(pkgname):
+ return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
+ tbz2=portage.xpak.tbz2(self.getname(pkgname))
+ return tbz2.getfile("USE").split()
+
+ def gettbz2(self, pkgname):
+ "fetches the package from a remote site, if necessary."
+ print "Fetching '"+str(pkgname)+"'"
+ mysplit = pkgname.split("/")
+ tbz2name = mysplit[1]+".tbz2"
+ if not self.isremote(pkgname):
+ if (tbz2name not in self.invalids):
+ return
+ else:
+ writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n",
+ noiselevel=-1)
+ mydest = self.pkgdir + "/All/"
+ try:
+ os.makedirs(mydest, 0775)
+ except (OSError, IOError):
+ pass
+ return portage.getbinpkg.file_get(
+ self.settings["PORTAGE_BINHOST"] + "/" + tbz2name,
+ mydest, fcmd=self.settings["RESUMECOMMAND"])
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+ return myslot
diff --git a/pym/portage/dbapi/porttree.py b/pym/portage/dbapi/porttree.py
new file mode 100644
index 000000000..edb5271d8
--- /dev/null
+++ b/pym/portage/dbapi/porttree.py
@@ -0,0 +1,781 @@
+from portage.dbapi import dbapi
+from portage.data import portage_gid
+from portage.util import ensure_dirs, writemsg
+from portage.exception import OperationNotPermitted, PortageException, \
+ UntrustedSignature, SecurityViolation, InvalidSignature, MissingSignature, \
+ FileNotFound, CacheError
+from portage.output import red
+from portage.manifest import Manifest
+from portage.dep import use_reduce, paren_reduce, dep_getslot
+
+import portage.gpg, portage.checksum
+
+from portage import eclass_cache, auxdbkeys, auxdbkeylen, \
+ apply_recursive_permissions, pkgsplit, doebuild, flatten, listdir, \
+ dep_expand, dep_getkey, catpkgsplit, match_from_list, match_to_list, \
+ eapi_is_supported, key_expand, dep_check
+
+import os, sys
+
+
+class portdbapi(dbapi):
+ """this tree will scan a portage directory located at root (passed to init)"""
+ portdbapi_instances = []
+
+ def __init__(self, porttree_root, mysettings=None):
+ portdbapi.portdbapi_instances.append(self)
+
+ if mysettings:
+ self.mysettings = mysettings
+ else:
+ from portage import settings
+ self.mysettings = config(clone=settings)
+
+ # This is strictly for use in aux_get() doebuild calls when metadata
+ # is generated by the depend phase. It's safest to use a clone for
+ # this purpose because doebuild makes many changes to the config
+ # instance that is passed in.
+ self.doebuild_settings = config(clone=self.mysettings)
+
+ self.manifestVerifyLevel = None
+ self.manifestVerifier = None
+ self.manifestCache = {} # {location: [stat, md5]}
+ self.manifestMissingCache = []
+
+ if "gpg" in self.mysettings.features:
+ self.manifestVerifyLevel = portage.gpg.EXISTS
+ if "strict" in self.mysettings.features:
+ self.manifestVerifyLevel = portage.gpg.MARGINAL
+ self.manifestVerifier = portage.gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+ elif "severe" in self.mysettings.features:
+ self.manifestVerifyLevel = portage.gpg.TRUSTED
+ self.manifestVerifier = portage.gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
+ else:
+ self.manifestVerifier = portage.gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+
+ #self.root=settings["PORTDIR"]
+ self.porttree_root = os.path.realpath(porttree_root)
+
+ self.depcachedir = self.mysettings.depcachedir[:]
+
+ self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
+ if self.tmpfs and not os.path.exists(self.tmpfs):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
+ self.tmpfs = None
+
+ self.eclassdb = eclass_cache.cache(self.porttree_root,
+ overlays=self.mysettings["PORTDIR_OVERLAY"].split())
+
+ self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
+
+ #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+ self.xcache = {}
+ self.frozen = 0
+
+ self.porttrees = [self.porttree_root] + \
+ [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
+ self.treemap = {}
+ for path in self.porttrees:
+ repo_name_path = os.path.join(path, REPO_NAME_LOC)
+ try:
+ repo_name = open(repo_name_path, 'r').readline().strip()
+ self.treemap[repo_name] = path
+ except (OSError,IOError):
+ pass
+
+ self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
+ self.auxdb = {}
+ self._init_cache_dirs()
+ # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
+ # ~harring
+ filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
+ if secpass < 1:
+ from portage.cache import metadata_overlay, volatile
+ for x in self.porttrees:
+ db_ro = self.auxdbmodule(self.depcachedir, x,
+ filtered_auxdbkeys, gid=portage_gid, readonly=True)
+ self.auxdb[x] = metadata_overlay.database(
+ self.depcachedir, x, filtered_auxdbkeys,
+ gid=portage_gid, db_rw=volatile.database,
+ db_ro=db_ro)
+ else:
+ for x in self.porttrees:
+ # location, label, auxdbkeys
+ self.auxdb[x] = self.auxdbmodule(
+ self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(["EAPI", "KEYWORDS", "SLOT"])
+ self._aux_cache = {}
+
+ def _init_cache_dirs(self):
+ """Create /var/cache/edb/dep and adjust permissions for the portage
+ group."""
+
+ dirmode = 02070
+ filemode = 060
+ modemask = 02
+
+ try:
+ for mydir in (self.depcachedir,):
+ if ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
+ writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
+ noiselevel=-1)
+ def onerror(e):
+ raise # bail out on the first error that occurs during recursion
+ if not apply_recursive_permissions(mydir,
+ gid=portage_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise OperationNotPermitted(
+ "Failed to apply recursive permissions for the portage group.")
+ except PortageException, e:
+ pass
+
+ def close_caches(self):
+ for x in self.auxdb.keys():
+ self.auxdb[x].sync()
+ self.auxdb.clear()
+
+ def flush_cache(self):
+ for x in self.auxdb.values():
+ x.sync()
+
+ def finddigest(self,mycpv):
+ try:
+ mydig = self.findname2(mycpv)[0]
+ if not mydig:
+ return ""
+ mydigs = mydig.split("/")[:-1]
+ mydig = "/".join(mydigs)
+ mysplit = mycpv.split("/")
+ except OSError:
+ return ""
+ return mydig+"/files/digest-"+mysplit[-1]
+
+ def findname(self,mycpv):
+ return self.findname2(mycpv)[0]
+
+ def getRepositoryPath(self, repository_id):
+ """
+ This function is required for GLEP 42 compliance; given a valid repository ID
+ it must return a path to the repository
+ TreeMap = { id:path }
+ """
+ if repository_id in self.treemap:
+ return self.treemap[repository_id]
+ return None
+
+ def getRepositories(self):
+ """
+ This function is required for GLEP 42 compliance; it will return a list of
+ repository ID's
+ TreeMap = {id: path}
+ """
+ return [k for k in self.treemap.keys() if k]
+
+ def findname2(self, mycpv, mytree=None):
+ """
+ Returns the location of the CPV, and what overlay it was in.
+ Searches overlays first, then PORTDIR; this allows us to return the first
+ matching file. As opposed to starting in portdir and then doing overlays
+ second, we would have to exhaustively search the overlays until we found
+ the file we wanted.
+ """
+ if not mycpv:
+ return "",0
+ mysplit = mycpv.split("/")
+ psplit = pkgsplit(mysplit[1])
+
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = self.porttrees[:]
+ mytrees.reverse()
+ if psplit:
+ for x in mytrees:
+ file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
+ if os.access(file, os.R_OK):
+ return[file, x]
+ return None, 0
+
+ def aux_get(self, mycpv, mylist, mytree=None):
+ "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
+ cache_me = False
+ if not mytree and not set(mylist).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ return [aux_cache[x] for x in mylist]
+ cache_me = True
+ global auxdbkeys, auxdbkeylen
+ cat,pkg = mycpv.split("/", 1)
+
+ myebuild, mylocation = self.findname2(mycpv, mytree)
+
+ if not myebuild:
+ writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
+ noiselevel=1)
+ writemsg("!!! %s\n" % myebuild, noiselevel=1)
+ raise KeyError(mycpv)
+
+ myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
+ if "gpg" in self.mysettings.features:
+ try:
+ mys = portage.gpg.fileStats(myManifestPath)
+ if (myManifestPath in self.manifestCache) and \
+ (self.manifestCache[myManifestPath] == mys):
+ pass
+ elif self.manifestVerifier:
+ if not self.manifestVerifier.verify(myManifestPath):
+ # Verification failed the desired level.
+ raise UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
+
+ if ("severe" in self.mysettings.features) and \
+ (mys != portage.gpg.fileStats(myManifestPath)):
+ raise SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
+
+ except InvalidSignature, e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise
+ writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
+ except MissingSignature, e:
+ if ("severe" in self.mysettings.features):
+ raise
+ if ("strict" in self.mysettings.features):
+ if myManifestPath not in self.manifestMissingCache:
+ writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
+ self.manifestMissingCache.insert(0,myManifestPath)
+ except (OSError, FileNotFound), e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
+ writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
+ noiselevel=-1)
+
+
+ if os.access(myebuild, os.R_OK):
+ emtime = os.stat(myebuild)[stat.ST_MTIME]
+ else:
+ writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
+ noiselevel=-1)
+ writemsg("!!! %s\n" % myebuild,
+ noiselevel=-1)
+ raise KeyError
+
+ try:
+ mydata = self.auxdb[mylocation][mycpv]
+ if emtime != long(mydata.get("_mtime_", 0)):
+ doregen = True
+ elif len(mydata.get("_eclasses_", [])) > 0:
+ doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
+ else:
+ doregen = False
+
+ except KeyError:
+ doregen = True
+ except CacheError:
+ doregen = True
+ try:
+ del self.auxdb[mylocation][mycpv]
+ except KeyError:
+ pass
+
+ writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
+
+ if doregen:
+ writemsg("doregen: %s %s\n" % (doregen, mycpv), 2)
+ writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n", 1)
+
+ self.doebuild_settings.reset()
+ mydata = {}
+ myret = doebuild(myebuild, "depend",
+ self.doebuild_settings["ROOT"], self.doebuild_settings,
+ dbkey=mydata, tree="porttree", mydbapi=self)
+ if myret != os.EX_OK:
+ raise KeyError(mycpv)
+
+ if "EAPI" not in mydata or not mydata["EAPI"].strip():
+ mydata["EAPI"] = "0"
+
+ if not eapi_is_supported(mydata["EAPI"]):
+ # if newer version, wipe everything and negate eapi
+ eapi = mydata["EAPI"]
+ mydata = {}
+ map(lambda x: mydata.setdefault(x, ""), auxdbkeys)
+ mydata["EAPI"] = "-"+eapi
+
+ if mydata.get("INHERITED", False):
+ mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
+ else:
+ mydata["_eclasses_"] = {}
+
+ del mydata["INHERITED"]
+
+ mydata["_mtime_"] = emtime
+
+ self.auxdb[mylocation][mycpv] = mydata
+
+ if not mydata.setdefault("EAPI", "0"):
+ mydata["EAPI"] = "0"
+
+ #finally, we look at our internal cache entry and return the requested data.
+ returnme = []
+ for x in mylist:
+ if x == "INHERITED":
+ returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
+ else:
+ returnme.append(mydata.get(x,""))
+
+ if cache_me:
+ aux_cache = {}
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, "")
+ self._aux_cache[mycpv] = aux_cache
+
+ return returnme
+
+ def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
+ if mysettings is None:
+ mysettings = self.mysettings
+ try:
+ myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
+ except KeyError:
+ print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
+ sys.exit(1)
+
+ if useflags is None:
+ useflags = mysettings["USE"].split()
+
+ myurilist = paren_reduce(myuris)
+ myurilist = use_reduce(myurilist, uselist=useflags, matchall=all)
+ newuris = flatten(myurilist)
+
+ myfiles = []
+ for x in newuris:
+ mya = os.path.basename(x)
+ if not mya in myfiles:
+ myfiles.append(mya)
+ return [newuris, myfiles]
+
+ def getfetchsizes(self, mypkg, useflags=None, debug=0):
+ # returns a filename:size dictionnary of remaining downloads
+ myebuild = self.findname(mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
+ checksums = mf.getDigests()
+ if not checksums:
+ if debug:
+ print "[empty/missing/bad digest]: "+mypkg
+ return None
+ filesdict={}
+ if useflags is None:
+ myuris, myfiles = self.getfetchlist(mypkg,all=1)
+ else:
+ myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
+ #XXX: maybe this should be improved: take partial downloads
+ # into account? check checksums?
+ for myfile in myfiles:
+ if myfile not in checksums:
+ if debug:
+ writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
+ continue
+ file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
+ mystat = None
+ try:
+ mystat = os.stat(file_path)
+ except OSError, e:
+ pass
+ if mystat is None:
+ existing_size = 0
+ else:
+ existing_size = mystat.st_size
+ remaining_size = int(checksums[myfile]["size"]) - existing_size
+ if remaining_size > 0:
+ # Assume the download is resumable.
+ filesdict[myfile] = remaining_size
+ elif remaining_size < 0:
+ # The existing file is too large and therefore corrupt.
+ filesdict[myfile] = int(checksums[myfile]["size"])
+ return filesdict
+
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
+ if not useflags:
+ if mysettings:
+ useflags = mysettings["USE"].split()
+ myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
+ myebuild = self.findname(mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
+ mysums = mf.getDigests()
+
+ failures = {}
+ for x in myfiles:
+ if not mysums or x not in mysums:
+ ok = False
+ reason = "digest missing"
+ else:
+ try:
+ ok, reason = portage.checksum.verify_all(
+ os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
+ except FileNotFound, e:
+ ok = False
+ reason = "File Not Found: '%s'" % str(e)
+ if not ok:
+ failures[x] = reason
+ if failures:
+ return False
+ return True
+
+ def getsize(self, mypkg, useflags=None, debug=0):
+ # returns the total size of remaining downloads
+ #
+ # we use getfetchsizes() now, so this function would be obsoleted
+ #
+ filesdict = self.getfetchsizes(mypkg, useflags=useflags, debug=debug)
+ if filesdict is None:
+ return "[empty/missing/bad digest]"
+ mysize=0
+ for myfile in filesdict.keys():
+ mysum += filesdict[myfile]
+ return mysum
+
+ def cpv_exists(self, mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ cps2 = mykey.split("/")
+ cps = catpkgsplit(mykey, silent=0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
+ if self.findname(cps[0] + "/" + cps2[1]):
+ return 1
+ else:
+ return 0
+
+ def cp_all(self):
+ "returns a list of all keys in our tree"
+ d = {}
+ for x in self.mysettings.categories:
+ for oroot in self.porttrees:
+ for y in listdir(oroot+"/"+x, EmptyOnError=1, ignorecvs=1, dirsonly=1):
+ d[x+"/"+y] = None
+ l = d.keys()
+ l.sort()
+ return l
+
+ def p_list(self,mycp):
+ d={}
+ for oroot in self.porttrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x[-7:]==".ebuild":
+ d[x[:-7]] = None
+ return d.keys()
+
+ def cp_list(self, mycp, use_cache=1, mytree=None):
+ mysplit = mycp.split("/")
+ d={}
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = self.porttrees
+ for oroot in mytrees:
+ for x in listdir(oroot+"/"+mycp, EmptyOnError=1, ignorecvs=1):
+ if x.endswith(".ebuild"):
+ pf = x[:-7]
+ ps = pkgsplit(pf)
+ if not ps:
+ writemsg("\nInvalid ebuild name: %s\n" % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ d[mysplit[0]+"/"+pf] = None
+ return d.keys()
+
+ def freeze(self):
+ for x in ["list-visible", "bestmatch-visible", "match-visible", "match-all"]:
+ self.xcache[x]={}
+ self.frozen=1
+
+ def melt(self):
+ self.xcache = {}
+ self.frozen = 0
+
+ def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+ "caching match function; very trick stuff"
+ #if no updates are being made to the tree, we can consult our xcache...
+ if self.frozen:
+ try:
+ return self.xcache[level][origdep][:]
+ except KeyError:
+ pass
+
+ if not mydep:
+ #this stuff only runs on first call of xmatch()
+ #create mydep, mykey from origdep
+ mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
+ mykey = dep_getkey(mydep)
+
+ if level == "list-visible":
+ #a list of all visible packages, not called directly (just by xmatch())
+ #myval = self.visible(self.cp_list(mykey))
+
+ myval = self.gvisible(self.visible(self.cp_list(mykey)))
+ elif level == "bestmatch-visible":
+ #dep match -- best match of all visible packages
+ #get all visible matches (from xmatch()), then choose the best one
+
+ myval = best(self.xmatch("match-visible", None, mydep=mydep, mykey=mykey))
+ elif level == "bestmatch-list":
+ #dep match -- find best match but restrict search to sublist
+ #no point in calling xmatch again since we're not caching list deps
+
+ myval = best(match_from_list(mydep, mylist))
+ elif level == "match-list":
+ #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+
+ myval = match_from_list(mydep, mylist)
+ elif level == "match-visible":
+ #dep match -- find all visible matches
+ #get all visible packages, then get the matching ones
+
+ myval = match_from_list(mydep,
+ self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey))
+ elif level == "match-all":
+ #match *all* visible *and* masked packages
+
+ myval = match_from_list(mydep, self.cp_list(mykey))
+ else:
+ print "ERROR: xmatch doesn't handle", level, "query!"
+ raise KeyError
+ myslot = dep_getslot(mydep)
+ if myslot is not None:
+ slotmatches = []
+ for cpv in myval:
+ try:
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot:
+ slotmatches.append(cpv)
+ except KeyError:
+ pass # ebuild masked by corruption
+ myval = slotmatches
+ if self.frozen and (level not in ["match-list", "bestmatch-list"]):
+ self.xcache[level][mydep] = myval
+ if origdep and origdep != mydep:
+ self.xcache[level][origdep] = myval
+ return myval[:]
+
+ def match(self, mydep, use_cache=1):
+ return self.xmatch("match-visible", mydep)
+
+ def visible(self, mylist):
+ """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
+ packages file to remove invisible entries, returning remaining items. This function assumes
+ that all entries in mylist have the same category and package name."""
+ if (mylist is None) or (len(mylist) == 0):
+ return []
+ newlist = mylist[:]
+ #first, we mask out packages in the package.mask file
+ mykey = newlist[0]
+ cpv = catpkgsplit(mykey)
+ if not cpv:
+ #invalid cat/pkg-v
+ print "visible(): invalid cat/pkg-v:", mykey
+ return []
+ mycp = cpv[0] + "/" + cpv[1]
+ maskdict = self.mysettings.pmaskdict
+ unmaskdict = self.mysettings.punmaskdict
+ if maskdict.has_key(mycp):
+ for x in maskdict[mycp]:
+ mymatches = self.xmatch("match-all", x)
+ if mymatches is None:
+ #error in package.mask file; print warning and continue:
+ print "visible(): package.mask entry \"" + x + "\" is invalid, ignoring..."
+ continue
+ for y in mymatches:
+ unmask = 0
+ if unmaskdict.has_key(mycp):
+ for z in unmaskdict[mycp]:
+ mymatches_unmask = self.xmatch("match-all",z)
+ if y in mymatches_unmask:
+ unmask = 1
+ break
+ if unmask == 0:
+ try:
+ newlist.remove(y)
+ except ValueError:
+ pass
+
+ revmaskdict = self.mysettings.prevmaskdict
+ if revmaskdict.has_key(mycp):
+ for x in revmaskdict[mycp]:
+ #important: only match against the still-unmasked entries...
+ #notice how we pass "newlist" to the xmatch() call below....
+ #Without this, ~ deps in the packages files are broken.
+ mymatches = self.xmatch("match-list", x, mylist=newlist)
+ if mymatches is None:
+ #error in packages file; print warning and continue:
+ print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
+ continue
+ pos = 0
+ while pos < len(newlist):
+ if newlist[pos] not in mymatches:
+ del newlist[pos]
+ else:
+ pos += 1
+ return newlist
+
+ def gvisible(self,mylist):
+ "strip out group-masked (not in current group) entries"
+
+ if mylist is None:
+ return []
+ newlist=[]
+
+ accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
+ pkgdict = self.mysettings.pkeywordsdict
+ for mycpv in mylist:
+ try:
+ keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
+ except KeyError:
+ continue
+ except PortageException, e:
+ writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % \
+ mycpv, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+ continue
+ mygroups = keys.split()
+ # Repoman may modify this attribute as necessary.
+ pgroups = accept_keywords[:]
+ match=0
+ cp = dep_getkey(mycpv)
+ if pkgdict.has_key(cp):
+ matches = match_to_list(mycpv, pkgdict[cp].keys())
+ for atom in matches:
+ pgroups.extend(pkgdict[cp][atom])
+ if matches:
+ inc_pgroups = []
+ for x in pgroups:
+ # The -* special case should be removed once the tree
+ # is clean of KEYWORDS=-* crap
+ if x != "-*" and x.startswith("-"):
+ try:
+ inc_pgroups.remove(x[1:])
+ except ValueError:
+ pass
+ if x not in inc_pgroups:
+ inc_pgroups.append(x)
+ pgroups = inc_pgroups
+ del inc_pgroups
+ hasstable = False
+ hastesting = False
+ for gp in mygroups:
+ if gp=="*":
+ writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
+ noiselevel=-1)
+ match=1
+ break
+ elif gp in pgroups:
+ match=1
+ break
+ elif gp[0] == "~":
+ hastesting = True
+ elif gp[0] != "-":
+ hasstable = True
+ if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups) or "**" in pgroups):
+ match=1
+ if match and eapi_is_supported(eapi):
+ newlist.append(mycpv)
+ return newlist
+
+
+def close_portdbapi_caches():
+ for i in portdbapi.portdbapi_instances:
+ i.close_caches()
+
+
+class portagetree(object):
+ def __init__(self, root="/", virtual=None, clone=None, settings=None):
+ """
+ Constructor for a PortageTree
+
+ @param root: ${ROOT}, defaults to '/', see make.conf(5)
+ @type root: String/Path
+ @param virtual: UNUSED
+ @type virtual: No Idea
+ @param clone: Set this if you want a copy of Clone
+ @type clone: Existing portagetree Instance
+ @param settings: Portage Configuration object (portage.settings)
+ @type settings: Instance of portage.config
+ """
+
+ if clone:
+ self.root = clone.root
+ self.portroot = clone.portroot
+ self.pkglines = clone.pkglines
+ else:
+ self.root = root
+ if settings is None:
+ from portage import settings
+ self.settings = settings
+ self.portroot = settings["PORTDIR"]
+ self.virtual = virtual
+ self.dbapi = portdbapi(
+ settings["PORTDIR"], mysettings=settings)
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method"
+ mymatch = self.dbapi.xmatch("bestmatch-visible",mydep)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def dep_match(self,mydep):
+ "compatibility method"
+ mymatch = self.dbapi.xmatch("match-visible",mydep)
+ if mymatch is None:
+ return []
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getname(self, pkgname):
+ "returns file location for this particular package (DEPRECATED)"
+ if not pkgname:
+ return ""
+ mysplit = pkgname.split("/")
+ psplit = pkgsplit(mysplit[1])
+ return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild"
+
+ def resolve_specific(self, myspec):
+ cps = catpkgsplit(myspec)
+ if not cps:
+ return None
+ mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
+ settings=self.settings)
+ mykey = mykey + "-" + cps[2]
+ if cps[3] != "r0":
+ mykey = mykey + "-" + cps[3]
+ return mykey
+
+ def depcheck(self, mycheck, use="yes", myusesplit=None):
+ return dep_check(mycheck, self.dbapi, use=use, myuse=myusesplit)
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+ return myslot
+
diff --git a/pym/portage/dbapi/vartree.py b/pym/portage/dbapi/vartree.py
new file mode 100644
index 000000000..259e8326f
--- /dev/null
+++ b/pym/portage/dbapi/vartree.py
@@ -0,0 +1,1837 @@
+from portage.dbapi import dbapi
+from portage.const import VDB_PATH, CACHE_PATH
+from portage.exception import InvalidPackageName, InvalidAtom, \
+ UnsupportedAPIException, FileNotFound
+from portage.dep import dep_getslot, use_reduce, paren_reduce
+from portage.util import apply_secpass_permissions, ConfigProtect, ensure_dirs
+from portage.data import portage_gid, portage_uid
+from portage.locks import lockdir, unlockdir
+from portage.checksum import perform_md5
+from portage.output import red, green
+
+from portage import pkgsplit, write_atomic, writemsg, isvalidatom, isjustname, \
+ catpkgsplit, writemsg_stdout, catsplit, fixdbentries, listdir, grabfile, \
+ dep_getkey, dep_expand, match_from_list, atomic_ofstream, dblink, config, \
+ flatten, key_expand, best, normalize_path, doebuild_environment, doebuild, \
+ elog_process, env_update, dircache, grabdict, writedict, pkgcmp, abssymlink, \
+ new_protect_filename, movefile, bsd_chflags
+
+import os, sys, stat, cPickle, errno, commands, copy, time
+from itertools import izip
+
+class vardbapi(dbapi):
+ def __init__(self, root, categories=None, settings=None, vartree=None):
+ self.root = root[:]
+
+ #cache for category directory mtimes
+ self.mtdircache = {}
+
+ #cache for dependency checks
+ self.matchcache = {}
+
+ #cache for cp_list results
+ self.cpcache = {}
+
+ self.blockers = None
+ if settings is None:
+ from portage import settings
+ self.settings = settings
+ if categories is None:
+ categories = settings.categories
+ self.categories = categories[:]
+ if vartree is None:
+ from portage import db
+ vartree = db[root]["vartree"]
+ self.vartree = vartree
+ self._aux_cache_keys = set(["SLOT", "COUNTER", "PROVIDE", "USE",
+ "IUSE", "DEPEND", "RDEPEND", "PDEPEND"])
+ self._aux_cache = None
+ self._aux_cache_version = "1"
+ self._aux_cache_filename = os.path.join(self.root,
+ CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
+
+ def getpath(self, mykey, filename=None):
+ rValue = self.getpath(mykey)
+ if filename != None:
+ rValue = os.path.join(rValue, filename)
+ return rValue
+
+ def cpv_exists(self, mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ return os.path.exists(self.getpath(mykey))
+
+ def cpv_counter(self, mycpv):
+ "This method will grab the COUNTER. Returns a counter value."
+ try:
+ return long(self.aux_get(mycpv, ["COUNTER"])[0])
+ except KeyError, ValueError:
+ pass
+ cdir = self.getpath(mycpv)
+ cpath = self.getpath(mycpv, filename="COUNTER")
+
+ # We write our new counter value to a new file that gets moved into
+ # place to avoid filesystem corruption on XFS (unexpected reboot.)
+ corrupted = 0
+ if os.path.exists(cpath):
+ cfile = open(cpath, "r")
+ try:
+ counter = long(cfile.readline())
+ except ValueError:
+ print "portage: COUNTER for", mycpv, "was corrupted; resetting to value of 0"
+ counter = long(0)
+ corrupted = 1
+ cfile.close()
+ elif os.path.exists(cdir):
+ mys = pkgsplit(mycpv)
+ myl = self.match(mys[0], use_cache=0)
+ print mys, myl
+ if len(myl) == 1:
+ try:
+ # Only one package... Counter doesn't matter.
+ write_atomic(cpath, "1")
+ counter = 1
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
+ noiselevel=-1)
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
+ noiselevel=-1)
+ writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
+ writemsg("!!! %s\n" % e, noiselevel=-1)
+ sys.exit(1)
+ else:
+ writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
+ noiselevel=-1)
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
+ noiselevel=-1)
+ writemsg("!!! remerge the package.\n", noiselevel=-1)
+ sys.exit(1)
+ else:
+ counter = long(0)
+ if corrupted:
+ # update new global counter file
+ write_atomic(cpath, str(counter))
+ return counter
+
+ def cpv_inject(self, mycpv):
+ "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
+ os.makedirs(self.getpath(mycpv))
+ counter = self.counter_tick(self.root, mycpv=mycpv)
+ # write local package counter so that emerge clean does the right thing
+ write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
+
+ def isInjected(self, mycpv):
+ if self.cpv_exists(mycpv):
+ if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
+ return True
+ if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
+ return True
+ return False
+
+ def move_ent(self, mylist):
+ origcp = mylist[1]
+ newcp = mylist[2]
+
+ # sanity check
+ for cp in [origcp, newcp]:
+ if not (isvalidatom(cp) and isjustname(cp)):
+ raise InvalidPackageName(cp)
+ origmatches = self.match(origcp, use_cache=0)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ mycpsplit = catpkgsplit(mycpv)
+ mynewcpv = newcp + "-" + mycpsplit[2]
+ mynewcat = newcp.split("/")[0]
+ if mycpsplit[3] != "r0":
+ mynewcpv += "-" + mycpsplit[3]
+ mycpsplit_new = catpkgsplit(mynewcpv)
+ origpath = self.getpath(mycpv)
+ if not os.path.exists(origpath):
+ continue
+ writemsg_stdout("@")
+ if not os.path.exists(self.getpath(mynewcat)):
+ #create the directory
+ os.makedirs(self.getpath(mynewcat))
+ newpath = self.getpath(mynewcpv)
+ if os.path.exists(newpath):
+ #dest already exists; keep this puppy where it is.
+ continue
+ os.rename(origpath, newpath)
+
+ # We need to rename the ebuild now.
+ old_pf = catsplit(mycpv)[1]
+ new_pf = catsplit(mynewcpv)[1]
+ if new_pf != old_pf:
+ try:
+ os.rename(os.path.join(newpath, old_pf + ".ebuild"),
+ os.path.join(newpath, new_pf + ".ebuild"))
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
+
+ write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
+ fixdbentries([mylist], newpath)
+
+ def update_ents(self, update_iter):
+ """Run fixdbentries on all installed packages (time consuming). Like
+ fixpackages, this should be run from a helper script and display
+ a progress indicator."""
+ dbdir = os.path.join(self.root, VDB_PATH)
+ for catdir in listdir(dbdir):
+ catdir = dbdir+"/"+catdir
+ if os.path.isdir(catdir):
+ for pkgdir in listdir(catdir):
+ pkgdir = catdir+"/"+pkgdir
+ if os.path.isdir(pkgdir):
+ fixdbentries(update_iter, pkgdir)
+
+ def move_slot_ent(self, mylist):
+ pkg = mylist[1]
+ origslot = mylist[2]
+ newslot = mylist[3]
+
+ if not isvalidatom(pkg):
+ raise InvalidAtom(pkg)
+
+ origmatches = self.match(pkg, use_cache=0)
+
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ origpath = self.getpath(mycpv)
+ if not os.path.exists(origpath):
+ continue
+
+ slot = grabfile(os.path.join(origpath, "SLOT"));
+ if (not slot):
+ continue
+
+ if (slot[0] != origslot):
+ continue
+
+ writemsg_stdout("s")
+ write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
+
+ def cp_list(self, mycp, use_cache=1):
+ mysplit=catsplit(mycp)
+ if mysplit[0] == '*':
+ mysplit[0] = mysplit[0][1:]
+ try:
+ mystat = os.stat(self.getpath(mysplit[0]))[stat.ST_MTIME]
+ except OSError:
+ mystat = 0
+ if use_cache and self.cpcache.has_key(mycp):
+ cpc = self.cpcache[mycp]
+ if cpc[0] == mystat:
+ return cpc[1]
+ list = listdir(self.getpath(mysplit[0]), EmptyOnError=1)
+
+ if (list is None):
+ return []
+ returnme = []
+ for x in list:
+ if x.startswith("."):
+ continue
+ if x[0] == '-':
+ #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
+ continue
+ ps = pkgsplit(x)
+ if not ps:
+ self.invalidentry(os.path.join(self.getpath(mysplit[0]), x)
+ continue
+ if len(mysplit) > 1:
+ if ps[0] == mysplit[1]:
+ returnme.append(mysplit[0]+"/"+x)
+ if use_cache:
+ self.cpcache[mycp] = [mystat,returnme]
+ elif self.cpcache.has_key(mycp):
+ del self.cpcache[mycp]
+ return returnme
+
+ def cpv_all(self, use_cache=1):
+ returnme = []
+ basepath = self.getpath("/")
+
+ for x in self.categories:
+ for y in listdir(basepath + x, EmptyOnError=1):
+ if y.startswith("."):
+ continue
+ subpath = x + "/" + y
+ # -MERGING- should never be a cpv, nor should files.
+ if os.path.isdir(basepath + subpath) and (pkgsplit(y) is not None):
+ returnme += [subpath]
+ return returnme
+
+ def cp_all(self, use_cache=1):
+ mylist = self.cpv_all(use_cache=use_cache)
+ d={}
+ for y in mylist:
+ if y[0] == '*':
+ y = y[1:]
+ mysplit = catpkgsplit(y)
+ if not mysplit:
+ self.invalidentry(self.getpath(y))
+ continue
+ d[mysplit[0]+"/"+mysplit[1]] = None
+ return d.keys()
+
+ def checkblockers(self, origdep):
+ pass
+
+ def match(self, origdep, use_cache=1):
+ "caching match function"
+ mydep = dep_expand(
+ origdep, mydb=self, use_cache=use_cache, settings=self.settings)
+ mykey = dep_getkey(mydep)
+ mycat = catsplit(mykey)[0]
+ if not use_cache:
+ if self.matchcache.has_key(mycat):
+ del self.mtdircache[mycat]
+ del self.matchcache[mycat]
+ mymatch = match_from_list(mydep,
+ self.cp_list(mykey, use_cache=use_cache))
+ myslot = dep_getslot(mydep)
+ if myslot is not None:
+ mymatch = [cpv for cpv in mymatch \
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot]
+ return mymatch
+ try:
+ curmtime = os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
+ except (IOError, OSError):
+ curmtime=0
+
+ if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
+ # clear cache entry
+ self.mtdircache[mycat] = curmtime
+ self.matchcache[mycat] = {}
+ if not self.matchcache[mycat].has_key(mydep):
+ mymatch = match_from_list(mydep, self.cp_list(mykey, use_cache=use_cache))
+ myslot = dep_getslot(mydep)
+ if myslot is not None:
+ mymatch = [cpv for cpv in mymatch \
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot]
+ self.matchcache[mycat][mydep] = mymatch
+ return self.matchcache[mycat][mydep][:]
+
+ def findname(self, mycpv):
+ return self.getpath(str(mycpv), filename=catsplit(mycpv[1])+".ebuild")
+
+ def flush_cache(self):
+ """If the current user has permission and the internal aux_get cache has
+ been updated, save it to disk and mark it unmodified. This is called
+ by emerge after it has loaded the full vdb for use in dependency
+ calculations. Currently, the cache is only written if the user has
+ superuser privileges (since that's required to obtain a lock), but all
+ users have read access and benefit from faster metadata lookups (as
+ long as at least part of the cache is still valid)."""
+ if self._aux_cache is not None and \
+ self._aux_cache["modified"] and \
+ secpass >= 2:
+ valid_nodes = set(self.cpv_all())
+ for cpv in self._aux_cache["packages"].keys():
+ if cpv not in valid_nodes:
+ del self._aux_cache["packages"][cpv]
+ del self._aux_cache["modified"]
+ try:
+ f = atomic_ofstream(self._aux_cache_filename)
+ cPickle.dump(self._aux_cache, f, -1)
+ f.close()
+ apply_secpass_permissions(
+ self._aux_cache_filename, gid=portage_gid, mode=0644)
+ except (IOError, OSError), e:
+ pass
+ self._aux_cache["modified"] = False
+
+ def aux_get(self, mycpv, wants):
+ """This automatically caches selected keys that are frequently needed
+ by emerge for dependency calculations. The cached metadata is
+ considered valid if the mtime of the package directory has not changed
+ since the data was cached. The cache is stored in a pickled dict
+ object with the following format:
+
+ {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
+
+ If an error occurs while loading the cache pickle or the version is
+ unrecognized, the cache will simple be recreated from scratch (it is
+ completely disposable).
+ """
+ if not self._aux_cache_keys.intersection(wants):
+ return self._aux_get(mycpv, wants)
+ if self._aux_cache is None:
+ try:
+ f = open(self._aux_cache_filename)
+ mypickle = cPickle.Unpickler(f)
+ mypickle.find_global = None
+ self._aux_cache = mypickle.load()
+ f.close()
+ del f
+ except (IOError, OSError, EOFError, cPickle.UnpicklingError):
+ pass
+ if not self._aux_cache or \
+ not isinstance(self._aux_cache, dict) or \
+ self._aux_cache.get("version") != self._aux_cache_version or \
+ not self._aux_cache.get("packages"):
+ self._aux_cache = {"version": self._aux_cache_version}
+ self._aux_cache["packages"] = {}
+ self._aux_cache["modified"] = False
+ mydir = self.getpath(mycpv)
+ mydir_stat = None
+ try:
+ mydir_stat = os.stat(mydir)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise KeyError(mycpv)
+ mydir_mtime = long(mydir_stat.st_mtime)
+ pkg_data = self._aux_cache["packages"].get(mycpv)
+ mydata = {}
+ cache_valid = False
+ if pkg_data:
+ cache_mtime, metadata = pkg_data
+ cache_valid = cache_mtime == mydir_mtime
+ if cache_valid and set(metadata) != self._aux_cache_keys:
+ # Allow self._aux_cache_keys to change without a cache version
+ # bump.
+ cache_valid = False
+ if cache_valid:
+ mydata.update(metadata)
+ pull_me = set(wants).difference(self._aux_cache_keys)
+ else:
+ pull_me = self._aux_cache_keys.union(wants)
+ if pull_me:
+ # pull any needed data and cache it
+ aux_keys = list(pull_me)
+ for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
+ mydata[k] = v
+ if not cache_valid:
+ cache_data = {}
+ for aux_key in self._aux_cache_keys:
+ cache_data[aux_key] = mydata[aux_key]
+ self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
+ self._aux_cache["modified"] = True
+ return [mydata[x] for x in wants]
+
+ def _aux_get(self, mycpv, wants):
+ mydir = self.getpath(mycpv)
+ try:
+ if not stat.S_ISDIR(os.stat(mydir).st_mode):
+ raise KeyError(mycpv)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise KeyError(mycpv)
+ del e
+ raise
+ results = []
+ for x in wants:
+ try:
+ myf = open(os.path.join(mydir, x), "r")
+ try:
+ myd = myf.read()
+ finally:
+ myf.close()
+ myd = " ".join(myd.split())
+ except IOError:
+ myd = ""
+ if x == "EAPI" and not myd:
+ results.append("0")
+ else:
+ results.append(myd)
+ return results
+
+ def aux_update(self, cpv, values):
+ cat, pkg = catsplit(cpv)
+ mylink = dblink(cat, pkg, self.root, self.settings,
+ treetype="vartree", vartree=self.vartree)
+ if not mylink.exists():
+ raise KeyError(cpv)
+ for k, v in values.iteritems():
+ mylink.setfile(k, v)
+
+ def counter_tick(self, myroot, mycpv=None):
+ return self.counter_tick_core(myroot, incrementing=1, mycpv=mycpv)
+
+ def get_counter_tick_core(self, myroot, mycpv=None):
+ return self.counter_tick_core(myroot, incrementing=0, mycpv=mycpv) + 1
+
+ def counter_tick_core(self, myroot, incrementing=1, mycpv=None):
+ "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
+ cpath = os.path.join(myroot, CACHE_PATH.lstrip(os.sep), "counter")
+ changed = 0
+ min_counter = 0
+ if mycpv:
+ mysplit = pkgsplit(mycpv)
+ for x in self.match(mysplit[0], use_cache=0):
+ if x == mycpv:
+ continue
+ try:
+ old_counter = long(self.aux_get(x, ["COUNTER"])[0])
+ writemsg("COUNTER '%d' '%s'\n" % (old_counter, x), 1)
+ except (ValueError, KeyError): # valueError from long(), KeyError from aux_get
+ old_counter = 0
+ writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
+ if old_counter > min_counter:
+ min_counter = old_counter
+
+ # We write our new counter value to a new file that gets moved into
+ # place to avoid filesystem corruption.
+ find_counter = ("find '%s' -type f -name COUNTER | " + \
+ "while read f; do echo $(<\"${f}\"); done | " + \
+ "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
+ if os.path.exists(cpath):
+ cfile = open(cpath, "r")
+ try:
+ counter = long(cfile.readline())
+ except (ValueError,OverflowError):
+ try:
+ counter = long(commands.getoutput(find_counter).strip())
+ writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
+ noiselevel=-1)
+ changed=1
+ except (ValueError, OverflowError):
+ writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
+ noiselevel=-1)
+ writemsg("!!! corrected/normalized so that portage can operate properly.\n",
+ noiselevel=-1)
+ writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
+ sys.exit(2)
+ cfile.close()
+ else:
+ try:
+ counter = long(commands.getoutput(find_counter).strip())
+ writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
+ noiselevel=-1)
+ except ValueError: # Value Error for long(), probably others for commands.getoutput
+ writemsg("!!! Initializing global counter.\n", noiselevel=-1)
+ counter = long(0)
+ changed = 1
+
+ if counter < min_counter:
+ counter = min_counter + 1000
+ changed = 1
+
+ if incrementing or changed:
+
+ #increment counter
+ counter += 1
+ # update new global counter file
+ write_atomic(cpath, str(counter))
+ return counter
+
+class vartree(object):
+ "this tree will scan a var/db/pkg database located at root (passed to init)"
+ def __init__(self, root="/", virtual=None, clone=None, categories=None,
+ settings=None):
+ if clone:
+ self.root = clone.root[:]
+ self.dbapi = copy.deepcopy(clone.dbapi)
+ self.populated = 1
+ self.settings = config(clone=clone.settings)
+ else:
+ self.root = root[:]
+ if settings is None:
+ from portage import settings
+ self.settings = settings # for key_expand calls
+ if categories is None:
+ categories = settings.categories
+ self.dbapi = vardbapi(self.root, categories=categories,
+ settings=settings, vartree=self)
+ self.populated = 1
+
+ def getpath(self, mykey, filename=None):
+ rValue = self.getpath(mykey)
+ if filename != None:
+ rValue = os.path.join(rValue, filename)
+ return rValue
+
+ def zap(self, mycpv):
+ return
+
+ def inject(self, mycpv):
+ return
+
+ def get_provide(self, mycpv):
+ myprovides = []
+ mylines = None
+ try:
+ mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
+ if mylines:
+ myuse = myuse.split()
+ mylines = flatten(use_reduce(paren_reduce(mylines), uselist=myuse))
+ for myprovide in mylines:
+ mys = catpkgsplit(myprovide)
+ if not mys:
+ mys = myprovide.split("/")
+ myprovides += [mys[0] + "/" + mys[1]]
+ return myprovides
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ mydir = self.getpath(mycpv)
+ writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
+ noiselevel=-1)
+ if mylines:
+ writemsg("Possibly Invalid: '%s'\n" % str(mylines),
+ noiselevel=-1)
+ writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
+ return []
+
+ def get_all_provides(self):
+ myprovides = {}
+ for node in self.getallcpv():
+ for mykey in self.get_provide(node):
+ if myprovides.has_key(mykey):
+ myprovides[mykey] += [node]
+ else:
+ myprovides[mykey] = [node]
+ return myprovides
+
+ def dep_bestmatch(self, mydep, use_cache=1):
+ "compatibility method -- all matches, not just visible ones"
+ #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
+ mymatch = best(self.dbapi.match(
+ dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
+ use_cache=use_cache))
+ if mymatch is None:
+ return ""
+ else:
+ return mymatch
+
+ def dep_match(self, mydep, use_cache=1):
+ "compatibility method -- we want to see all matches, not just visible ones"
+ #mymatch = match(mydep,self.dbapi)
+ mymatch = self.dbapi.match(mydep, use_cache=use_cache)
+ if mymatch is None:
+ return []
+ else:
+ return mymatch
+
+ def exists_specific(self, cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallcpv(self):
+ """temporary function, probably to be renamed --- Gets a list of all
+ category/package-versions installed on the system."""
+ return self.dbapi.cpv_all()
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def exists_specific_cat(self, cpv, use_cache=1):
+ cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
+ settings=self.settings)
+ a = catpkgsplit(cpv)
+ if not a:
+ return 0
+ mylist = listdir(self.getpath(a[0]), EmptyOnError=1)
+ for x in mylist:
+ b = pkgsplit(x)
+ if not b:
+ self.dbapi.invalidentry(self.getpath(a[0], filename=x))
+ continue
+ if a[1] == b[0]:
+ return 1
+ return 0
+
+ def getebuildpath(self, fullpackage):
+ cat, package = catsplit(fullpackage)
+ return self.getpath(fullpackage, filename=package+".ebuild")
+
+ def getnode(self, mykey, use_cache=1):
+ mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
+ settings=self.settings)
+ if not mykey:
+ return []
+ mysplit = catsplit(mykey)
+ mydirlist = listdir(self.getpath(mysplit[0]),EmptyOnError=1)
+ returnme = []
+ for x in mydirlist:
+ mypsplit = pkgsplit(x)
+ if not mypsplit:
+ self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
+ continue
+ if mypsplit[0] == mysplit[1]:
+ appendme = [mysplit[0]+"/"+x, [mysplit[0], mypsplit[0], mypsplit[1], mypsplit[2]]]
+ returnme.append(appendme)
+ return returnme
+
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ try:
+ return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+ except KeyError:
+ return ""
+
+ def hasnode(self, mykey, use_cache):
+ """Does the particular node (cat/pkg key) exist?"""
+ mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
+ settings=self.settings)
+ mysplit = catsplit(mykey)
+ mydirlist = listdir(self.getpath(mysplit[0]), EmptyOnError=1)
+ for x in mydirlist:
+ mypsplit = pkgsplit(x)
+ if not mypsplit:
+ self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
+ continue
+ if mypsplit[0] == mysplit[1]:
+ return 1
+ return 0
+
+ def populate(self):
+ self.populated=1
+
+class dblink(object):
+ """
+ This class provides an interface to the installed package database
+ At present this is implemented as a text backend in /var/db/pkg.
+ """
+ def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
+ vartree=None):
+ """
+ Creates a DBlink object for a given CPV.
+ The given CPV may not be present in the database already.
+
+ @param cat: Category
+ @type cat: String
+ @param pkg: Package (PV)
+ @type pkg: String
+ @param myroot: Typically ${ROOT}
+ @type myroot: String (Path)
+ @param mysettings: Typically portage.config
+ @type mysettings: An instance of portage.config
+ @param treetype: one of ['porttree','bintree','vartree']
+ @type treetype: String
+ @param vartree: an instance of vartree corresponding to myroot.
+ @type vartree: vartree
+ """
+
+ self.cat = cat
+ self.pkg = pkg
+ self.mycpv = self.cat + "/" + self.pkg
+ self.mysplit = pkgsplit(self.mycpv)
+ self.treetype = treetype
+ if vartree is None:
+ from portage import db
+ vartree = db[myroot]["vartree"]
+ self.vartree = vartree
+
+ self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
+ self.dbcatdir = self.dbroot+"/"+cat
+ self.dbpkgdir = self.dbcatdir+"/"+pkg
+ self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
+ self.dbdir = self.dbpkgdir
+
+ self._lock_vdb = None
+
+ self.settings = mysettings
+ if self.settings == 1:
+ raise ValueError
+
+ self.myroot=myroot
+ protect_obj = ConfigProtect(myroot,
+ mysettings.get("CONFIG_PROTECT","").split(),
+ mysettings.get("CONFIG_PROTECT_MASK","").split())
+ self.updateprotect = protect_obj.updateprotect
+ self._config_protect = protect_obj
+ self._installed_instance = None
+ self.contentscache=[]
+ self._contents_inodes = None
+
+ def lockdb(self):
+ if self._lock_vdb:
+ raise AssertionError("Lock already held.")
+ # At least the parent needs to exist for the lock file.
+ ensure_dirs(self.dbroot)
+ self._lock_vdb = lockdir(self.dbroot)
+
+ def unlockdb(self):
+ if self._lock_vdb:
+ unlockdir(self._lock_vdb)
+ self._lock_vdb = None
+
+ def getpath(self):
+ "return path to location of db information (for >>> informational display)"
+ return self.dbdir
+
+ def exists(self):
+ "does the db entry exist? boolean."
+ return os.path.exists(self.dbdir)
+
+ def delete(self):
+ """
+ Remove this entry from the database
+ """
+ if not os.path.exists(self.dbdir):
+ return
+ try:
+ for x in listdir(self.dbdir):
+ os.unlink(self.dbdir+"/"+x)
+ os.rmdir(self.dbdir)
+ except OSError, e:
+ print "!!! Unable to remove db entry for this package."
+ print "!!! It is possible that a directory is in this one. Portage will still"
+ print "!!! register this package as installed as long as this directory exists."
+ print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
+ print "!!! "+str(e)
+ print
+ sys.exit(1)
+
+ def clearcontents(self):
+ """
+ For a given db entry (self), erase the CONTENTS values.
+ """
+ if os.path.exists(self.dbdir+"/CONTENTS"):
+ os.unlink(self.dbdir+"/CONTENTS")
+
+ def getcontents(self):
+ """
+ Get the installed files of a given package (aka what that package installed)
+ """
+ contents_file = os.path.join(self.dbdir, "CONTENTS")
+ if not os.path.exists(contents_file):
+ return None
+ if self.contentscache != []:
+ return self.contentscache
+ pkgfiles = {}
+ myc = open(contents_file,"r")
+ mylines = myc.readlines()
+ myc.close()
+ null_byte = "\0"
+ pos = 0
+ for line in mylines:
+ pos += 1
+ if null_byte in line:
+ # Null bytes are a common indication of corruption.
+ writemsg("!!! Null byte found in contents " + \
+ "file, line %d: '%s'\n" % (pos, contents_file),
+ noiselevel=-1)
+ continue
+ mydat = line.split()
+ # we do this so we can remove from non-root filesystems
+ # (use the ROOT var to allow maintenance on other partitions)
+ try:
+ mydat[1] = normalize_path(os.path.join(
+ self.myroot, mydat[1].lstrip(os.path.sep)))
+ if mydat[0] == "obj":
+ #format: type, mtime, md5sum
+ pkgfiles[" ".join(mydat[1:-2])] = [mydat[0], mydat[-1], mydat[-2]]
+ elif mydat[0] == "dir":
+ #format: type
+ pkgfiles[" ".join(mydat[1:])] = [mydat[0] ]
+ elif mydat[0] == "sym":
+ #format: type, mtime, dest
+ x = len(mydat) - 1
+ if (x >= 13) and (mydat[-1][-1] == ')'): # Old/Broken symlink entry
+ mydat = mydat[:-10] + [mydat[-10:][stat.ST_MTIME][:-1]]
+ writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
+ x = len(mydat) - 1
+ splitter = -1
+ while (x >= 0):
+ if mydat[x] == "->":
+ splitter = x
+ break
+ x = x - 1
+ if splitter == -1:
+ return None
+ pkgfiles[" ".join(mydat[1:splitter])] = [mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
+ elif mydat[0] == "dev":
+ #format: type
+ pkgfiles[" ".join(mydat[1:])] = [mydat[0] ]
+ elif mydat[0]=="fif":
+ #format: type
+ pkgfiles[" ".join(mydat[1:])] = [mydat[0]]
+ else:
+ return None
+ except (KeyError, IndexError):
+ print "portage: CONTENTS line", pos, "corrupt!"
+ self.contentscache = pkgfiles
+ return pkgfiles
+
+ def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
+ ldpath_mtimes=None):
+ """
+ Calls prerm
+ Unmerges a given package (CPV)
+ calls postrm
+ calls cleanrm
+ calls env_update
+
+ @param pkgfiles: files to unmerge (generally self.getcontents() )
+ @type pkgfiles: Dictionary
+ @param trimworld: Remove CPV from world file if True, not if False
+ @type trimworld: Boolean
+ @param cleanup: cleanup to pass to doebuild (see doebuild)
+ @type cleanup: Boolean
+ @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
+ @type ldpath_mtimes: Dictionary
+ @rtype: Integer
+ @returns:
+ 1. os.EX_OK if everything went well.
+ 2. return code of the failed phase (for prerm, postrm, cleanrm)
+
+ Notes:
+ The caller must ensure that lockdb() and unlockdb() are called
+ before and after this method.
+ """
+
+ contents = self.getcontents()
+ # Now, don't assume that the name of the ebuild is the same as the
+ # name of the dir; the package may have been moved.
+ myebuildpath = None
+ mystuff = listdir(self.dbdir, EmptyOnError=1)
+ for x in mystuff:
+ if x.endswith(".ebuild"):
+ myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
+ if x[:-7] != self.pkg:
+ # Clean up after vardbapi.move_ent() breakage in
+ # portage versions before 2.1.2
+ os.rename(os.path.join(self.dbdir, x), myebuildpath)
+ write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
+ break
+
+ self.settings.load_infodir(self.dbdir)
+ if myebuildpath:
+ try:
+ doebuild_environment(myebuildpath, "prerm", self.myroot,
+ self.settings, 0, 0, self.vartree.dbapi)
+ except UnsupportedAPIException, e:
+ # Sometimes this happens due to corruption of the EAPI file.
+ writemsg("!!! FAILED prerm: %s\n" % \
+ os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
+ writemsg("%s\n" % str(e), noiselevel=-1)
+ return 1
+ catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
+ ensure_dirs(os.path.dirname(catdir),
+ uid=portage_uid, gid=portage_gid, mode=070, mask=0)
+ builddir_lock = None
+ catdir_lock = None
+ try:
+ if myebuildpath:
+ catdir_lock = lockdir(catdir)
+ ensure_dirs(catdir,
+ uid=portage_uid, gid=portage_gid,
+ mode=070, mask=0)
+ builddir_lock = lockdir(
+ self.settings["PORTAGE_BUILDDIR"])
+ try:
+ unlockdir(catdir_lock)
+ finally:
+ catdir_lock = None
+ # Eventually, we'd like to pass in the saved ebuild env here...
+ retval = doebuild(myebuildpath, "prerm", self.myroot,
+ self.settings, cleanup=cleanup, use_cache=0,
+ mydbapi=self.vartree.dbapi, tree="vartree",
+ vartree=self.vartree)
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
+ return retval
+
+ self._unmerge_pkgfiles(pkgfiles)
+
+ if myebuildpath:
+ retval = doebuild(myebuildpath, "postrm", self.myroot,
+ self.settings, use_cache=0, tree="vartree",
+ mydbapi=self.vartree.dbapi, vartree=self.vartree)
+
+ # process logs created during pre/postrm
+ elog_process(self.mycpv, self.settings)
+
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
+ return retval
+ doebuild(myebuildpath, "cleanrm", self.myroot, self.settings,
+ tree="vartree", mydbapi=self.vartree.dbapi,
+ vartree=self.vartree)
+
+ finally:
+ if builddir_lock:
+ unlockdir(builddir_lock)
+ try:
+ if myebuildpath and not catdir_lock:
+ # Lock catdir for removal if empty.
+ catdir_lock = lockdir(catdir)
+ finally:
+ if catdir_lock:
+ try:
+ os.rmdir(catdir)
+ except OSError, e:
+ if e.errno not in (errno.ENOENT,
+ errno.ENOTEMPTY, errno.EEXIST):
+ raise
+ del e
+ unlockdir(catdir_lock)
+ env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
+ contents=contents)
+ return os.EX_OK
+
+ def _unmerge_pkgfiles(self, pkgfiles):
+ """
+
+ Unmerges the contents of a package from the liveFS
+ Removes the VDB entry for self
+
+ @param pkgfiles: typically self.getcontents()
+ @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
+ @rtype: None
+ """
+ global dircache
+ dircache={}
+
+ if not pkgfiles:
+ writemsg_stdout("No package files given... Grabbing a set.\n")
+ pkgfiles = self.getcontents()
+
+ if pkgfiles:
+ mykeys = pkgfiles.keys()
+ mykeys.sort()
+ mykeys.reverse()
+
+ #process symlinks second-to-last, directories last.
+ mydirs = []
+ modprotect = "/lib/modules/"
+ for objkey in mykeys:
+ obj = normalize_path(objkey)
+ if obj[:2] == "//":
+ obj = obj[1:]
+ statobj = None
+ try:
+ statobj = os.stat(obj)
+ except OSError:
+ pass
+ lstatobj = None
+ try:
+ lstatobj = os.lstat(obj)
+ except (OSError, AttributeError):
+ pass
+ islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
+ if statobj is None:
+ if not islink:
+ #we skip this if we're dealing with a symlink
+ #because os.stat() will operate on the
+ #link target rather than the link itself.
+ writemsg_stdout("--- !found " + str(pkgfiles[objkey][0]) + " %s\n" % obj)
+ continue
+ # next line includes a tweak to protect modules from being unmerged,
+ # but we don't protect modules from being overwritten if they are
+ # upgraded. We effectively only want one half of the config protection
+ # functionality for /lib/modules. For portage-ng both capabilities
+ # should be able to be independently specified.
+ if obj.startswith(modprotect):
+ writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
+ continue
+
+ lmtime = str(lstatobj[stat.ST_MTIME])
+ if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
+ writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
+ continue
+
+ if pkgfiles[objkey][0] == "dir":
+ if statobj is None or not stat.S_ISDIR(statobj.st_mode):
+ writemsg_stdout("--- !dir %s %s\n" % ("dir", obj))
+ continue
+ mydirs.append(obj)
+ elif pkgfiles[objkey][0] == "sym":
+ if not islink:
+ writemsg_stdout("--- !sym %s %s\n" % ("sym", obj))
+ continue
+ try:
+ os.unlink(obj)
+ writemsg_stdout("<<< %s %s\n" % ("sym", obj))
+ except (OSError, IOError),e:
+ writemsg_stdout("!!! %s %s\n" % ("sym", obj))
+ elif pkgfiles[objkey][0] == "obj":
+ if statobj is None or not stat.S_ISREG(statobj.st_mode):
+ writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
+ continue
+ mymd5 = None
+ try:
+ mymd5 = perform_md5(obj, calc_prelink=1)
+ except FileNotFound, e:
+ # the file has disappeared between now and our stat call
+ writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
+ continue
+
+ # string.lower is needed because db entries used to be in upper-case. The
+ # string.lower allows for backwards compatibility.
+ if mymd5 != pkgfiles[objkey][2].lower():
+ writemsg_stdout("--- !md5 %s %s\n" % ("obj", obj))
+ continue
+ try:
+ os.unlink(obj)
+ except (OSError, IOError), e:
+ pass
+ writemsg_stdout("<<< %s %s\n" % ("obj", obj))
+ elif pkgfiles[objkey][0] == "fif":
+ if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
+ writemsg_stdout("--- !fif %s %s\n" % ("fif", obj))
+ continue
+ writemsg_stdout("--- %s %s\n" % ("fif", obj))
+ elif pkgfiles[objkey][0] == "dev":
+ writemsg_stdout("--- %s %s\n" % ("dev", obj))
+
+ mydirs.sort()
+ mydirs.reverse()
+
+ for obj in mydirs:
+ try:
+ os.rmdir(obj)
+ writemsg_stdout("<<< %s %s\n" % ("dir", obj))
+ except (OSError, IOError):
+ writemsg_stdout("--- !empty dir %s\n" % obj)
+
+ #remove self from vartree database so that our own virtual gets zapped if we're the last node
+ self.vartree.zap(self.mycpv)
+
+ def isowner(self,filename, destroot):
+ """
+ Check if filename is a new file or belongs to this package
+ (for this or a previous version)
+
+ @param filename:
+ @type filename:
+ @param destroot:
+ @type destroot:
+ @rtype: Boolean
+ @returns:
+ 1. True if this package owns the file.
+ 2. False if this package does not own the file.
+ """
+ destfile = normalize_path(
+ os.path.join(destroot, filename.lstrip(os.path.sep)))
+ try:
+ mylstat = os.lstat(destfile)
+ except (OSError, IOError):
+ return True
+
+ pkgfiles = self.getcontents()
+ if pkgfiles and filename in pkgfiles:
+ return True
+ if pkgfiles:
+ if self._contents_inodes is None:
+ self._contents_inodes = set()
+ for x in pkgfiles:
+ try:
+ lstat = os.lstat(x)
+ self._contents_inodes.add((lstat.st_dev, lstat.st_ino))
+ except OSError:
+ pass
+ if (mylstat.st_dev, mylstat.st_ino) in self._contents_inodes:
+ return True
+
+ return False
+
+ def isprotected(self, filename):
+ """In cases where an installed package in the same slot owns a
+ protected file that will be merged, bump the mtime on the installed
+ file in order to ensure that it isn't unmerged."""
+ if not self._config_protect.isprotected(filename):
+ return False
+ if self._installed_instance is None:
+ return True
+ mydata = self._installed_instance.getcontents().get(filename, None)
+ if mydata is None:
+ return True
+
+ # Bump the mtime in order to ensure that the old config file doesn't
+ # get unmerged. The user will have an opportunity to merge the new
+ # config with the old one.
+ try:
+ os.utime(filename, None)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ # The file has disappeared, so it's not protected.
+ return False
+ return True
+
+
+ def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
+ mydbapi=None, prev_mtimes=None):
+ """
+
+ This function does the following:
+
+ Collision Protection.
+ calls doebuild(mydo=pkg_preinst)
+ Merges the package to the livefs
+ unmerges old version (if required)
+ calls doebuild(mydo=pkg_postinst)
+ calls env_update
+
+ @param srcroot: Typically this is ${D}
+ @type srcroot: String (Path)
+ @param destroot: Path to merge to (usually ${ROOT})
+ @type destroot: String (Path)
+ @param inforoot: root of the vardb entry ?
+ @type inforoot: String (Path)
+ @param myebuild: path to the ebuild that we are processing
+ @type myebuild: String (Path)
+ @param mydbapi: dbapi which is handed to doebuild.
+ @type mydbapi: portdbapi instance
+ @param prev_mtimes: { Filename:mtime } mapping for env_update
+ @type prev_mtimes: Dictionary
+ @rtype: Boolean
+ @returns:
+ 1. 0 on success
+ 2. 1 on failure
+
+ secondhand is a list of symlinks that have been skipped due to their target
+ not existing; we will merge these symlinks at a later time.
+ """
+ if not os.path.isdir(srcroot):
+ writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
+ noiselevel=-1)
+ return 1
+
+ if not os.path.exists(self.dbcatdir):
+ os.makedirs(self.dbcatdir)
+
+ otherversions = []
+ for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
+ otherversions.append(v.split("/")[1])
+
+ slot_matches = self.vartree.dbapi.match(
+ "%s:%s" % (self.mysplit[0], self.settings["SLOT"]))
+ if slot_matches:
+ # Used by self.isprotected().
+ self._installed_instance = dblink(self.cat,
+ catsplit(slot_matches[0])[1], destroot, self.settings,
+ vartree=self.vartree)
+
+ # check for package collisions
+ if "collision-protect" in self.settings.features:
+ collision_ignore = set([normalize_path(myignore) for myignore in \
+ self.settings.get("COLLISION_IGNORE", "").split()])
+ myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
+
+ # the linkcheck only works if we are in srcroot
+ mycwd = os.getcwd()
+ os.chdir(srcroot)
+ mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
+ myfilelist.extend(mysymlinks)
+ mysymlinked_directories = [s + os.path.sep for s in mysymlinks]
+ del mysymlinks
+
+ stopmerge = False
+ i=0
+
+ otherpkg=[]
+ mypkglist=[]
+
+ if self.pkg in otherversions:
+ otherversions.remove(self.pkg) # we already checked this package
+
+ myslot = self.settings["SLOT"]
+ for v in otherversions:
+ # only allow versions with same slot to overwrite files
+ if myslot == self.vartree.dbapi.aux_get("/".join((self.cat, v)), ["SLOT"])[0]:
+ mypkglist.append(
+ dblink(self.cat, v, destroot, self.settings,
+ vartree=self.vartree))
+
+ collisions = []
+
+ print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
+ for f in myfilelist:
+ nocheck = False
+ # listdir isn't intelligent enough to exclude symlinked dirs,
+ # so we have to do it ourself
+ for s in mysymlinked_directories:
+ if f.startswith(s):
+ nocheck = True
+ break
+ if nocheck:
+ continue
+ i = i + 1
+ if i % 1000 == 0:
+ print str(i)+" files checked ..."
+ if f[0] != "/":
+ f="/"+f
+ isowned = False
+ for ver in [self] + mypkglist:
+ if (ver.isowner(f, destroot) or ver.isprotected(f)):
+ isowned = True
+ break
+ if not isowned:
+ collisions.append(f)
+ print "existing file "+f+" is not owned by this package"
+ stopmerge = True
+ if collision_ignore:
+ if f in collision_ignore:
+ stopmerge = False
+ else:
+ for myignore in collision_ignore:
+ if f.startswith(myignore + os.path.sep):
+ stopmerge = False
+ break
+ if stopmerge:
+ print red("*")+" This package is blocked because it wants to overwrite"
+ print red("*")+" files belonging to other packages (see messages above)."
+ print red("*")+" If you have no clue what this is all about report it "
+ print red("*")+" as a bug for this package on http://bugs.gentoo.org"
+ print
+ print red("package "+self.cat+"/"+self.pkg+" NOT merged")
+ print
+ print
+ print "Searching all installed packages for file collisions..."
+ print "Press Ctrl-C to Stop"
+ print
+ """ Note: The isowner calls result in a stat call for *every*
+ single installed file, since the inode numbers are used to work
+ around the problem of ambiguous paths caused by symlinked files
+ and/or directories. Though it is slow, it is as accurate as
+ possible."""
+ found_owner = False
+ for cpv in self.vartree.dbapi.cpv_all():
+ cat, pkg = catsplit(cpv)
+ mylink = dblink(cat, pkg, destroot, self.settings,
+ vartree=self.vartree)
+ mycollisions = []
+ for f in collisions:
+ if mylink.isowner(f, destroot):
+ mycollisions.append(f)
+ if mycollisions:
+ found_owner = True
+ print " * %s:" % cpv
+ print
+ for f in mycollisions:
+ print " '%s'" % \
+ os.path.join(destroot, f.lstrip(os.path.sep))
+ print
+ if not found_owner:
+ print "None of the installed packages claim the above file(s)."
+ print
+ sys.exit(1)
+ try:
+ os.chdir(mycwd)
+ except OSError:
+ pass
+
+ if os.stat(srcroot).st_dev == os.stat(destroot).st_dev:
+ """ The merge process may move files out of the image directory,
+ which causes invalidation of the .installed flag."""
+ try:
+ os.unlink(os.path.join(
+ os.path.dirname(normalize_path(srcroot)), ".installed"))
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ # get old contents info for later unmerging
+ oldcontents = self.getcontents()
+
+ self.dbdir = self.dbtmpdir
+ self.delete()
+ if not os.path.exists(self.dbtmpdir):
+ os.makedirs(self.dbtmpdir)
+
+ writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
+
+ # run preinst script
+ if myebuild is None:
+ myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+ a = doebuild(myebuild, "preinst", destroot, self.settings, cleanup=cleanup,
+ use_cache=0, tree=self.treetype, mydbapi=mydbapi,
+ vartree=self.vartree)
+
+ # XXX: Decide how to handle failures here.
+ if a != os.EX_OK:
+ writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
+ return a
+
+ # copy "info" files (like SLOT, CFLAGS, etc.) into the database
+ for x in listdir(inforoot):
+ self.copyfile(inforoot+"/"+x)
+
+ # get current counter value (counter_tick also takes care of incrementing it)
+ # XXX Need to make this destroot, but it needs to be initialized first. XXX
+ # XXX bis: leads to some invalidentry() call through cp_all().
+ counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
+ # write local package counter for recording
+ lcfile = open(os.path.join(self.dbtmpdir, "COUNTER"),"w")
+ lcfile.write(str(counter))
+ lcfile.close()
+
+ # open CONTENTS file (possibly overwriting old one) for recording
+ outfile = open(os.path.join(self.dbtmpdir, "CONTENTS"),"w")
+
+ self.updateprotect()
+
+ #if we have a file containing previously-merged config file md5sums, grab it.
+ conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
+ cfgfiledict = grabdict(conf_mem_file)
+ if self.settings.has_key("NOCONFMEM"):
+ cfgfiledict["IGNORE"]=1
+ else:
+ cfgfiledict["IGNORE"]=0
+
+ # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
+ mymtime = long(time.time())
+ prevmask = os.umask(0)
+ secondhand = []
+
+ # we do a first merge; this will recurse through all files in our srcroot but also build up a
+ # "second hand" of symlinks to merge later
+ if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
+ return 1
+
+ # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
+ # broken symlinks. We'll merge them too.
+ lastlen = 0
+ while len(secondhand) and len(secondhand)!=lastlen:
+ # clear the thirdhand. Anything from our second hand that
+ # couldn't get merged will be added to thirdhand.
+
+ thirdhand = []
+ self.mergeme(srcroot, destroot, outfile, thirdhand, secondhand, cfgfiledict, mymtime)
+
+ #swap hands
+ lastlen = len(secondhand)
+
+ # our thirdhand now becomes our secondhand. It's ok to throw
+ # away secondhand since thirdhand contains all the stuff that
+ # couldn't be merged.
+ secondhand = thirdhand
+
+ if len(secondhand):
+ # force merge of remaining symlinks (broken or circular; oh well)
+ self.mergeme(srcroot, destroot, outfile, None, secondhand, cfgfiledict, mymtime)
+
+ #restore umask
+ os.umask(prevmask)
+
+ #if we opened it, close it
+ outfile.flush()
+ outfile.close()
+
+ if os.path.exists(self.dbpkgdir):
+ writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
+ self.dbdir = self.dbpkgdir
+ self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes)
+ self.dbdir = self.dbtmpdir
+ writemsg_stdout(">>> Original instance of package unmerged safely.\n")
+
+ # We hold both directory locks.
+ self.dbdir = self.dbpkgdir
+ self.delete()
+ movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+ contents = self.getcontents()
+
+ #write out our collection of md5sums
+ if cfgfiledict.has_key("IGNORE"):
+ del cfgfiledict["IGNORE"]
+
+ my_private_path = os.path.join(destroot, PRIVATE_PATH)
+ if not os.path.exists(my_private_path):
+ os.makedirs(my_private_path)
+ os.chown(my_private_path, os.getuid(), portage_gid)
+ os.chmod(my_private_path, 02770)
+
+ writedict(cfgfiledict, conf_mem_file)
+ del conf_mem_file
+
+ #do postinst script
+ a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
+ tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
+
+ # XXX: Decide how to handle failures here.
+ if a != os.EX_OK:
+ writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
+ return a
+
+ downgrade = False
+ for v in otherversions:
+ if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
+ downgrade = True
+
+ #update environment settings, library paths. DO NOT change symlinks.
+ env_update(makelinks=(not downgrade),
+ target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
+ contents=contents)
+ #dircache may break autoclean because it remembers the -MERGING-pkg file
+ global dircache
+ if dircache.has_key(self.dbcatdir):
+ del dircache[self.dbcatdir]
+ writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
+
+ # Process ebuild logfiles
+ elog_process(self.mycpv, self.settings)
+ if "noclean" not in self.settings.features:
+ doebuild(myebuild, "clean", destroot, self.settings,
+ tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
+ return os.EX_OK
+
+ def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
+ """
+
+ This function handles actual merging of the package contents to the livefs.
+ It also handles config protection.
+
+ @param srcroot: Where are we copying files from (usually ${D})
+ @type srcroot: String (Path)
+ @param destroot: Typically ${ROOT}
+ @type destroot: String (Path)
+ @param outfile: File to log operations to
+ @type outfile: File Object
+ @param secondhand: A set of items to merge in pass two (usually
+ or symlinks that point to non-existing files that may get merged later)
+ @type secondhand: List
+ @param stufftomerge: Either a diretory to merge, or a list of items.
+ @type stufftomerge: String or List
+ @param cfgfiledict: { File:mtime } mapping for config_protected files
+ @type cfgfiledict: Dictionary
+ @param thismtime: The current time (typically long(time.time())
+ @type thismtime: Long
+ @rtype: None or Boolean
+ @returns:
+ 1. True on failure
+ 2. None otherwise
+
+ """
+ from os.path import sep, join
+ srcroot = normalize_path(srcroot).rstrip(sep) + sep
+ destroot = normalize_path(destroot).rstrip(sep) + sep
+
+ # this is supposed to merge a list of files. There will be 2 forms of argument passing.
+ if type(stufftomerge) == types.StringType:
+ #A directory is specified. Figure out protection paths, listdir() it and process it.
+ mergelist = listdir(join(srcroot, stufftomerge))
+ offset = stufftomerge
+ else:
+ mergelist = stufftomerge
+ offset = ""
+ for x in mergelist:
+ mysrc = join(srcroot, offset, x)
+ mydest = join(destroot, offset, x)
+ # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
+ myrealdest = join(sep, offset, x)
+ # stat file once, test using S_* macros many times (faster that way)
+ try:
+ mystat = os.lstat(mysrc)
+ except OSError, e:
+ writemsg("\n")
+ writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
+ writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
+ writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
+ writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
+ writemsg(red("!!! File: ")+str(mysrc)+"\n", noiselevel=-1)
+ writemsg(red("!!! Error: ")+str(e)+"\n", noiselevel=-1)
+ sys.exit(1)
+ except Exception, e:
+ writemsg("\n")
+ writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
+ writemsg(red("!!! A stat call returned the following error for the following file:"))
+ writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
+ writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
+ writemsg( "!!! File: "+str(mysrc)+"\n", noiselevel=-1)
+ writemsg( "!!! Error: "+str(e)+"\n", noiselevel=-1)
+ sys.exit(1)
+
+
+ mymode = mystat[stat.ST_MODE]
+ # handy variables; mydest is the target object on the live filesystems;
+ # mysrc is the source object in the temporary install dir
+ try:
+ mydmode = os.lstat(mydest).st_mode
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ #dest file doesn't exist
+ mydmode = None
+
+ if stat.S_ISLNK(mymode):
+ # we are merging a symbolic link
+ myabsto = abssymlink(mysrc)
+ if myabsto.startswith(srcroot):
+ myabsto = myabsto[len(srcroot):]
+ myabsto = myabsto.lstrip(sep)
+ myto = os.readlink(mysrc)
+ if self.settings and self.settings["D"]:
+ if myto.startswith(self.settings["D"]):
+ myto = myto[len(self.settings["D"]):]
+ # myrealto contains the path of the real file to which this symlink points.
+ # we can simply test for existence of this file to see if the target has been merged yet
+ myrealto = normalize_path(os.path.join(destroot, myabsto))
+ if mydmode!=None:
+ #destination exists
+ if not stat.S_ISLNK(mydmode):
+ if stat.S_ISDIR(mydmode):
+ # directory in the way: we can't merge a symlink over a directory
+ # we won't merge this, continue with next file...
+ continue
+
+ if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
+ # Kill file blocking installation of symlink to dir #71787
+ pass
+ elif self.isprotected(mydest):
+ # Use md5 of the target in ${D} if it exists...
+ try:
+ newmd5 = perform_md5(join(srcroot, myabsto))
+ except FileNotFound:
+ # Maybe the target is merged already.
+ try:
+ newmd5 = perform_md5(myrealto)
+ except FileNotFound:
+ newmd5 = None
+ mydest = new_protect_filename(mydest, newmd5=newmd5)
+
+ # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
+ if (secondhand != None) and (not os.path.exists(myrealto)):
+ # either the target directory doesn't exist yet or the target file doesn't exist -- or
+ # the target is a broken symlink. We will add this file to our "second hand" and merge
+ # it later.
+ secondhand.append(mysrc[len(srcroot):])
+ continue
+ # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
+ mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
+ if mymtime != None:
+ writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ else:
+ print "!!! Failed to move file."
+ print "!!!", mydest, "->", myto
+ sys.exit(1)
+ elif stat.S_ISDIR(mymode):
+ # we are merging a directory
+ if mydmode != None:
+ # destination exists
+
+ if bsd_chflags:
+ # Save then clear flags on dest.
+ dflags = os.lstat(mydest).st_flags
+ if dflags != 0:
+ bsd_chflags.lchflags(mydest, 0)
+
+ if not os.access(mydest, os.W_OK):
+ pkgstuff = pkgsplit(self.pkg)
+ writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
+ writemsg("!!! Please check permissions and directories for broken symlinks.\n")
+ writemsg("!!! You may start the merge process again by using ebuild:\n")
+ writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
+ writemsg("!!! And finish by running this: env-update\n\n")
+ return 1
+
+ if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
+ # a symlink to an existing directory will work for us; keep it:
+ writemsg_stdout("--- %s/\n" % mydest)
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ else:
+ # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
+ if movefile(mydest, mydest+".backup", mysettings=self.settings) is None:
+ sys.exit(1)
+ print "bak", mydest, mydest+".backup"
+ #now create our directory
+ if self.settings.selinux_enabled():
+ import selinux
+ sid = selinux.get_sid(mysrc)
+ selinux.secure_mkdir(mydest,sid)
+ else:
+ os.mkdir(mydest)
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ os.chmod(mydest, mystat[0])
+ os.chown(mydest, mystat[4], mystat[5])
+ writemsg_stdout(">>> %s/\n" % mydest)
+ else:
+ #destination doesn't exist
+ if self.settings.selinux_enabled():
+ import selinux
+ sid = selinux.get_sid(mysrc)
+ selinux.secure_mkdir(mydest, sid)
+ else:
+ os.mkdir(mydest)
+ os.chmod(mydest, mystat[0])
+ os.chown(mydest, mystat[4], mystat[5])
+ writemsg_stdout(">>> %s/\n" % mydest)
+ outfile.write("dir "+myrealdest+"\n")
+ # recurse and merge this directory
+ if self.mergeme(srcroot, destroot, outfile, secondhand,
+ join(offset, x), cfgfiledict, thismtime):
+ return 1
+ elif stat.S_ISREG(mymode):
+ # we are merging a regular file
+ mymd5 = perform_md5(mysrc, calc_prelink=1)
+ # calculate config file protection stuff
+ mydestdir = os.path.dirname(mydest)
+ moveme = 1
+ zing = "!!!"
+ if mydmode != None:
+ # destination file exists
+ if stat.S_ISDIR(mydmode):
+ # install of destination is blocked by an existing directory with the same name
+ moveme = 0
+ writemsg_stdout("!!! %s\n" % mydest)
+ elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
+ cfgprot = 0
+ # install of destination is blocked by an existing regular file,
+ # or by a symlink to an existing regular file;
+ # now, config file management may come into play.
+ # we only need to tweak mydest if cfg file management is in play.
+ if self.isprotected(mydest):
+ # we have a protection path; enable config file management.
+ destmd5 = perform_md5(mydest, calc_prelink=1)
+ if mymd5 == destmd5:
+ #file already in place; simply update mtimes of destination
+ os.utime(mydest, (thismtime, thismtime))
+ zing = "---"
+ moveme = 0
+ else:
+ if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
+ """ An identical update has previously been
+ merged. Skip it unless the user has chosen
+ --noconfmem."""
+ zing = "-o-"
+ moveme = cfgfiledict["IGNORE"]
+ cfgprot = cfgfiledict["IGNORE"]
+ else:
+ moveme = 1
+ cfgprot = 1
+ if moveme:
+ # Merging a new file, so update confmem.
+ cfgfiledict[myrealdest] = [mymd5]
+ elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
+ """A previously remembered update has been
+ accepted, so it is removed from confmem."""
+ del cfgfiledict[myrealdest]
+ if cfgprot:
+ mydest = new_protect_filename(mydest, newmd5=mymd5)
+
+ # whether config protection or not, we merge the new file the
+ # same way. Unless moveme=0 (blocking directory)
+ if moveme:
+ mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
+ if mymtime is None:
+ sys.exit(1)
+ zing = ">>>"
+ else:
+ mymtime = thismtime
+ # We need to touch the destination so that on --update the
+ # old package won't yank the file with it. (non-cfgprot related)
+ os.utime(mydest, (thismtime,thismtime))
+ zing = "---"
+ if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
+
+ # XXX kludge, can be killed when portage stops relying on
+ # md5+mtime, and uses refcounts
+ # alright, we've fooled w/ mtime on the file; this pisses off static archives
+ # basically internal mtime != file's mtime, so the linker (falsely) thinks
+ # the archive is stale, and needs to have it's toc rebuilt.
+
+ myf = open(mydest, "r+")
+
+ # ar mtime field is digits padded with spaces, 12 bytes.
+ lms = str(thismtime+5).ljust(12)
+ myf.seek(0)
+ magic = myf.read(8)
+ if magic != "!<arch>\n":
+ # not an archive (dolib.a from portage.py makes it here fex)
+ myf.close()
+ else:
+ st = os.stat(mydest)
+ while myf.tell() < st.st_size - 12:
+ # skip object name
+ myf.seek(16, 1)
+
+ # update mtime
+ myf.write(lms)
+
+ # skip uid/gid/mperm
+ myf.seek(20, 1)
+
+ # read the archive member's size
+ x = long(myf.read(10))
+
+ # skip the trailing newlines, and add the potential
+ # extra padding byte if it's not an even size
+ myf.seek(x + 2 + (x % 2),1)
+
+ # and now we're at the end. yay.
+ myf.close()
+ mymd5 = perform_md5(mydest, calc_prelink=1)
+ os.utime(mydest, (thismtime, thismtime))
+
+ if mymtime != None:
+ zing = ">>>"
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ writemsg_stdout("%s %s\n" % (zing,mydest))
+ else:
+ # we are merging a fifo or device node
+ zing = "!!!"
+ if mydmode is None:
+ # destination doesn't exist
+ if movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings) != None:
+ zing = ">>>"
+ else:
+ sys.exit(1)
+ if stat.S_ISFIFO(mymode):
+ outfile.write("fif %s\n" % myrealdest)
+ else:
+ outfile.write("dev %s\n" % myrealdest)
+ writemsg_stdout(zing + " " + mydest + "\n")
+
+ def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
+ mydbapi=None, prev_mtimes=None):
+ try:
+ self.lockdb()
+ return self.treewalk(mergeroot, myroot, inforoot, myebuild,
+ cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
+ finally:
+ self.unlockdb()
+
+ def getstring(self,name):
+ "returns contents of a file with whitespace converted to spaces"
+ if not os.path.exists(self.dbdir+"/"+name):
+ return ""
+ myfile = open(self.dbdir+"/"+name,"r")
+ mydata = myfile.read().split()
+ myfile.close()
+ return " ".join(mydata)
+
+ def copyfile(self,fname):
+ import shutil
+ shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
+
+ def getfile(self,fname):
+ if not os.path.exists(self.dbdir+"/"+fname):
+ return ""
+ myfile = open(self.dbdir+"/"+fname,"r")
+ mydata = myfile.read()
+ myfile.close()
+ return mydata
+
+ def setfile(self,fname,data):
+ write_atomic(os.path.join(self.dbdir, fname), data)
+
+ def getelements(self,ename):
+ if not os.path.exists(self.dbdir+"/"+ename):
+ return []
+ myelement = open(self.dbdir+"/"+ename,"r")
+ mylines = myelement.readlines()
+ myreturn = []
+ for x in mylines:
+ for y in x[:-1].split():
+ myreturn.append(y)
+ myelement.close()
+ return myreturn
+
+ def setelements(self,mylist,ename):
+ myelement = open(self.dbdir+"/"+ename,"w")
+ for x in mylist:
+ myelement.write(x+"\n")
+ myelement.close()
+
+ def isregular(self):
+ "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
+ return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
diff --git a/pym/portage/dbapi/virtual.py b/pym/portage/dbapi/virtual.py
new file mode 100644
index 000000000..0becd1e6c
--- /dev/null
+++ b/pym/portage/dbapi/virtual.py
@@ -0,0 +1,89 @@
+from portage import cpv_getkey
+from portage.dbapi import dbapi
+
+class fakedbapi(dbapi):
+ "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
+ def __init__(self, settings=None):
+ self.cpvdict = {}
+ self.cpdict = {}
+ if settings is None:
+ from portage import settings
+ self.settings = settings
+ self._match_cache = {}
+
+ def _clear_cache(self):
+ if self._match_cache:
+ self._match_cache = {}
+
+ def match(self, origdep, use_cache=1):
+ result = self._match_cache.get(origdep, None)
+ if result is not None:
+ return result[:]
+ result = dbapi.match(self, origdep, use_cache=use_cache)
+ self._match_cache[origdep] = result
+ return result[:]
+
+ def cpv_exists(self, mycpv):
+ return self.cpvdict.has_key(mycpv)
+
+ def cp_list(self, mycp, use_cache=1):
+ if not self.cpdict.has_key(mycp):
+ return []
+ else:
+ return self.cpdict[mycp]
+
+ def cp_all(self):
+ returnme=[]
+ for x in self.cpdict.keys():
+ returnme.extend(self.cpdict[x])
+ return returnme
+
+ def cpv_all(self):
+ return self.cpvdict.keys()
+
+ def cpv_inject(self, mycpv, metadata=None):
+ """Adds a cpv from the list of available packages."""
+ self._clear_cache()
+ mycp = cpv_getkey(mycpv)
+ self.cpvdict[mycpv] = metadata
+ myslot = None
+ if metadata:
+ myslot = metadata.get("SLOT", None)
+ if myslot and mycp in self.cpdict:
+ # If necessary, remove another package in the same SLOT.
+ for cpv in self.cpdict[mycp]:
+ if mycpv != cpv:
+ other_metadata = self.cpvdict[cpv]
+ if other_metadata:
+ if myslot == other_metadata.get("SLOT", None):
+ self.cpv_remove(cpv)
+ break
+ if mycp not in self.cpdict:
+ self.cpdict[mycp] = []
+ if not mycpv in self.cpdict[mycp]:
+ self.cpdict[mycp].append(mycpv)
+
+ def cpv_remove(self,mycpv):
+ """Removes a cpv from the list of available packages."""
+ self._clear_cache()
+ mycp = cpv_getkey(mycpv)
+ if self.cpvdict.has_key(mycpv):
+ del self.cpvdict[mycpv]
+ if not self.cpdict.has_key(mycp):
+ return
+ while mycpv in self.cpdict[mycp]:
+ del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
+ if not len(self.cpdict[mycp]):
+ del self.cpdict[mycp]
+
+ def aux_get(self, mycpv, wants):
+ if not self.cpv_exists(mycpv):
+ raise KeyError(mycpv)
+ metadata = self.cpvdict[mycpv]
+ if not metadata:
+ return ["" for x in wants]
+ return [metadata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ self._clear_cache()
+ self.cpvdict[cpv].update(values)