diff options
-rw-r--r-- | doc/dependency_resolution/task_scheduling.docbook | 12 | ||||
-rw-r--r-- | pym/_emerge/__init__.py | 202 | ||||
-rw-r--r-- | pym/portage/__init__.py | 9 | ||||
-rw-r--r-- | pym/portage/dbapi/vartree.py | 54 |
4 files changed, 233 insertions, 44 deletions
diff --git a/doc/dependency_resolution/task_scheduling.docbook b/doc/dependency_resolution/task_scheduling.docbook index f879980cd..01953128e 100644 --- a/doc/dependency_resolution/task_scheduling.docbook +++ b/doc/dependency_resolution/task_scheduling.docbook @@ -21,7 +21,7 @@ </para> <para> In order to avoid a conflict, a package may need to be uninstalled - in advance, rather than through replacement. The following constraints + rather than replaced. The following constraints protect inappropriate packages from being chosen for automatic uninstallation: <itemizedlist> @@ -46,6 +46,16 @@ </listitem> </itemizedlist> </para> + <para> + In order to ensure that package files remain installed in a usable state + whenever possible, uninstallation operations are not executed + until after all associated conflicting packages have been installed. + When file collisions occur between conflicting packages, the contents + entries for those files are removed from the packages + that are scheduled for uninstallation. This prevents + uninstallation operations from removing overlapping files that + have been claimed by conflicting packages. + </para> </sect1> <sect1 id='dependency-resolution-task-scheduling-circular-dependencies'> <title>Circular Dependencies</title> diff --git a/pym/_emerge/__init__.py b/pym/_emerge/__init__.py index cc236633b..a1933d6c1 100644 --- a/pym/_emerge/__init__.py +++ b/pym/_emerge/__init__.py @@ -729,7 +729,6 @@ class search(object): result = "" return result - class RootConfig(object): """This is used internally by depgraph to track information about a particular $ROOT.""" @@ -1273,13 +1272,22 @@ class Package(Task): __slots__ = ("built", "cpv", "depth", "installed", "metadata", "onlydeps", "operation", "root", "type_name", - "cp", "cpv_slot", "pv_split", "slot_atom") + "category", "cp", "cpv_slot", "pf", "pv_split", "slot_atom") + + metadata_keys = [ + "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS", + "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND", + "repository", "RESTRICT", "SLOT", "USE"] + def __init__(self, **kwargs): Task.__init__(self, **kwargs) self.cp = portage.cpv_getkey(self.cpv) self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"]) self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"]) - self.pv_split = portage.catpkgsplit(self.cpv)[1:] + cpv_parts = portage.catpkgsplit(self.cpv) + self.category = cpv_parts[0] + self.pv_split = cpv_parts[1:] + self.pf = self.cpv.replace(self.category + "/", "", 1) def _get_hash_key(self): hash_key = getattr(self, "_hash_key", None) @@ -1366,6 +1374,9 @@ class BlockerCache(DictMixin): 2) the old-style virtuals have changed """ class BlockerData(object): + + __slots__ = ("__weakref__", "atoms", "counter") + def __init__(self, counter, atoms): self.counter = counter self.atoms = atoms @@ -1507,6 +1518,84 @@ class BlockerCache(DictMixin): an AttributeError.""" return list(self) +class BlockerDB(object): + + def __init__(self, vartree, portdb): + self._vartree = vartree + self._portdb = portdb + self._blocker_cache = \ + BlockerCache(self._vartree.root, vartree.dbapi) + self._dep_check_trees = { self._vartree.root : { + "porttree" : self._vartree, + "vartree" : self._vartree, + }} + self._installed_pkgs = None + + def findInstalledBlockers(self, new_pkg): + self._update_cache() + blocker_parents = digraph() + blocker_atoms = [] + for pkg in self._installed_pkgs: + for blocker_atom in self._blocker_cache[pkg.cpv].atoms: + blocker_atom = blocker_atom[1:] + blocker_atoms.append(blocker_atom) + blocker_parents.add(blocker_atom, pkg) + + blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms) + blocking_pkgs = set() + for atom in blocker_atoms.iterAtomsForPackage(new_pkg): + blocking_pkgs.update(blocker_parents.parent_nodes(atom)) + return blocking_pkgs + + def _update_cache(self): + blocker_cache = self._blocker_cache + dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"] + dep_check_trees = self._dep_check_trees + settings = self._vartree.settings + stale_cache = set(blocker_cache) + fake_vartree = \ + FakeVartree(self._vartree, + self._portdb, Package.metadata_keys, {}) + vardb = fake_vartree.dbapi + self._installed_pkgs = list(vardb) + + for inst_pkg in self._installed_pkgs: + stale_cache.discard(inst_pkg.cpv) + cached_blockers = blocker_cache.get(inst_pkg.cpv) + if cached_blockers is not None and \ + cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]): + cached_blockers = None + if cached_blockers is not None: + blocker_atoms = cached_blockers.atoms + else: + myuse = inst_pkg.metadata["USE"].split() + # Use aux_get() to trigger FakeVartree global + # updates on *DEPEND when appropriate. + depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys)) + try: + portage.dep._dep_check_strict = False + success, atoms = portage.dep_check(depstr, + vardb, settings, myuse=myuse, + trees=dep_check_trees, myroot=inst_pkg.root) + finally: + portage.dep._dep_check_strict = True + if not success: + pkg_location = os.path.join(inst_pkg.root, + portage.VDB_PATH, inst_pkg.category, inst_pkg.pf) + portage.writemsg("!!! %s/*DEPEND: %s\n" % \ + (pkg_location, atoms), noiselevel=-1) + continue + + blocker_atoms = [atom for atom in atoms \ + if atom.startswith("!")] + blocker_atoms.sort() + counter = long(inst_pkg.metadata["COUNTER"]) + blocker_cache[inst_pkg.cpv] = \ + blocker_cache.BlockerData(counter, blocker_atoms) + for cpv in stale_cache: + del blocker_cache[cpv] + blocker_cache.flush() + def show_invalid_depstring_notice(parent_node, depstring, error_msg): from formatter import AbstractFormatter, DumbWriter @@ -1660,10 +1749,7 @@ class depgraph(object): "binary":"bintree", "installed":"vartree"} - _mydbapi_keys = [ - "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS", - "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND", - "repository", "RESTRICT", "SLOT", "USE"] + _mydbapi_keys = Package.metadata_keys _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"] @@ -3484,6 +3570,9 @@ class depgraph(object): return -1 myblocker_uninstalls = self._blocker_uninstalls.copy() retlist=[] + # Contains uninstall tasks that have been scheduled to + # occur after overlapping blockers have been installed. + scheduled_uninstalls = set() # Contains any Uninstall tasks that have been ignored # in order to avoid the circular deps code path. These # correspond to blocker conflicts that could not be @@ -3698,10 +3787,16 @@ class depgraph(object): selected_nodes = list(selected_nodes) selected_nodes.sort(cmp_circular_bias) + if not selected_nodes and scheduled_uninstalls: + selected_nodes = set() + for node in scheduled_uninstalls: + if not mygraph.child_nodes(node): + selected_nodes.add(node) + scheduled_uninstalls.difference_update(selected_nodes) + if not selected_nodes and not myblocker_uninstalls.is_empty(): # An Uninstall task needs to be executed in order to # avoid conflict if possible. - min_parent_deps = None uninst_task = None for task in myblocker_uninstalls.leaf_nodes(): @@ -3819,7 +3914,20 @@ class depgraph(object): uninst_task = task if uninst_task is not None: - selected_nodes = [uninst_task] + # The uninstall is performed only after blocking + # packages have been merged on top of it. File + # collisions between blocking packages are detected + # and removed from the list of files to be uninstalled. + scheduled_uninstalls.add(uninst_task) + parent_nodes = mygraph.parent_nodes(uninst_task) + + # Reverse the parent -> uninstall edges since we want + # to do the uninstall after blocking packages have + # been merged on top of it. + mygraph.remove(uninst_task) + for blocked_pkg in parent_nodes: + mygraph.add(blocked_pkg, uninst_task, + priority=BlockerDepPriority.instance) else: # None of the Uninstall tasks are acceptable, so # the corresponding blockers are unresolvable. @@ -3836,12 +3944,12 @@ class depgraph(object): ignored_uninstall_tasks.add(node) break - # After dropping an Uninstall task, reset - # the state variables for leaf node selection and - # continue trying to select leaf nodes. - prefer_asap = True - accept_root_node = False - continue + # After dropping an Uninstall task, reset + # the state variables for leaf node selection and + # continue trying to select leaf nodes. + prefer_asap = True + accept_root_node = False + continue if not selected_nodes: self._circular_deps_for_display = mygraph @@ -4002,6 +4110,8 @@ class depgraph(object): verbosity = ("--quiet" in self.myopts and 1 or \ "--verbose" in self.myopts and 3 or 2) favorites_set = InternalPackageSet(favorites) + oneshot = "--oneshot" in self.myopts or \ + "--onlydeps" in self.myopts changelogs=[] p=[] blockers = [] @@ -4558,7 +4668,8 @@ class depgraph(object): try: pkg_system = system_set.findAtomForPackage(pkg_key, metadata) pkg_world = world_set.findAtomForPackage(pkg_key, metadata) - if not pkg_world and myroot == self.target_root and \ + if not (oneshot or pkg_world) and \ + myroot == self.target_root and \ favorites_set.findAtomForPackage(pkg_key, metadata): # Maybe it will be added to world now. if create_world_atom(pkg_key, metadata, @@ -5368,12 +5479,35 @@ class MergeTask(object): if settings.get("PORTAGE_DEBUG", "") == "1": self.edebug = 1 self.pkgsettings = {} + self._blocker_db = {} for root in trees: self.pkgsettings[root] = portage.config( clone=trees[root]["vartree"].settings) + self._blocker_db[root] = BlockerDB( + trees[root]["vartree"], + trees[root]["porttree"].dbapi) self.curval = 0 self._spawned_pids = [] - self._uninstall_queue = [] + + def _find_blockers(self, new_pkg): + for opt in ("--buildpkgonly", "--nodeps", + "--fetchonly", "--fetch-all-uri", "--pretend"): + if opt in self.myopts: + return None + + blocker_dblinks = [] + for blocking_pkg in self._blocker_db[ + new_pkg.root].findInstalledBlockers(new_pkg): + if new_pkg.slot_atom == blocking_pkg.slot_atom: + continue + if new_pkg.cpv == blocking_pkg.cpv: + continue + blocker_dblinks.append(portage.dblink( + blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root, + self.pkgsettings[blocking_pkg.root], treetype="vartree", + vartree=self.trees[blocking_pkg.root]["vartree"])) + + return blocker_dblinks def merge(self, mylist, favorites, mtimedb): try: @@ -5403,17 +5537,6 @@ class MergeTask(object): pass spawned_pids.remove(pid) - def _dequeue_uninstall_tasks(self, mtimedb): - if not self._uninstall_queue: - return - for uninst_task in self._uninstall_queue: - root_config = self.trees[uninst_task.root]["root_config"] - unmerge(root_config, self.myopts, "unmerge", - [uninst_task.cpv], mtimedb["ldpath"], clean_world=0) - del mtimedb["resume"]["mergelist"][0] - mtimedb.commit() - del self._uninstall_queue[:] - def _merge(self, mylist, favorites, mtimedb): from portage.elog import elog_process from portage.elog.filtering import filter_mergephases @@ -5554,7 +5677,10 @@ class MergeTask(object): metadata = pkg.metadata if pkg.installed: if not (buildpkgonly or fetchonly or pretend): - self._uninstall_queue.append(pkg) + unmerge(root_config, self.myopts, "unmerge", + [pkg.cpv], mtimedb["ldpath"], clean_world=0) + del mtimedb["resume"]["mergelist"][0] + mtimedb.commit() continue if x[0]=="blocks": @@ -5655,20 +5781,22 @@ class MergeTask(object): return retval bintree = self.trees[myroot]["bintree"] bintree.inject(pkg_key, filename=binpkg_tmpfile) - self._dequeue_uninstall_tasks(mtimedb) + if "--buildpkgonly" not in self.myopts: msg = " === (%s of %s) Merging (%s::%s)" % \ (mergecount, len(mymergelist), pkg_key, y) short_msg = "emerge: (%s of %s) %s Merge" % \ (mergecount, len(mymergelist), pkg_key) emergelog(xterm_titles, msg, short_msg=short_msg) + retval = portage.merge(pkgsettings["CATEGORY"], pkgsettings["PF"], pkgsettings["D"], os.path.join(pkgsettings["PORTAGE_BUILDDIR"], "build-info"), myroot, pkgsettings, myebuild=pkgsettings["EBUILD"], mytree="porttree", mydbapi=portdb, - vartree=vartree, prev_mtimes=ldpath_mtimes) + vartree=vartree, prev_mtimes=ldpath_mtimes, + blockers=self._find_blockers(pkg)) if retval != os.EX_OK: return retval elif "noclean" not in pkgsettings.features: @@ -5687,14 +5815,15 @@ class MergeTask(object): prev_mtimes=ldpath_mtimes) if retval != os.EX_OK: return retval - self._dequeue_uninstall_tasks(mtimedb) + retval = portage.merge(pkgsettings["CATEGORY"], pkgsettings["PF"], pkgsettings["D"], os.path.join(pkgsettings["PORTAGE_BUILDDIR"], "build-info"), myroot, pkgsettings, myebuild=pkgsettings["EBUILD"], mytree="porttree", mydbapi=portdb, - vartree=vartree, prev_mtimes=ldpath_mtimes) + vartree=vartree, prev_mtimes=ldpath_mtimes, + blockers=self._find_blockers(pkg)) if retval != os.EX_OK: return retval finally: @@ -5716,7 +5845,6 @@ class MergeTask(object): portage.locks.unlockdir(catdir_lock) elif x[0]=="binary": - self._dequeue_uninstall_tasks(mtimedb) #merge the tbz2 mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key) if "--getbinpkg" in self.myopts: @@ -5772,7 +5900,8 @@ class MergeTask(object): retval = portage.pkgmerge(mytbz2, x[1], pkgsettings, mydbapi=bindb, vartree=self.trees[myroot]["vartree"], - prev_mtimes=ldpath_mtimes) + prev_mtimes=ldpath_mtimes, + blockers=self._find_blockers(pkg)) if retval != os.EX_OK: return retval #need to check for errors @@ -7888,6 +8017,7 @@ def action_build(settings, trees, mtimedb, fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts ask = "--ask" in myopts nodeps = "--nodeps" in myopts + oneshot = "--oneshot" in myopts or "--onlydeps" in myopts tree = "--tree" in myopts if nodeps and tree: tree = False @@ -8059,7 +8189,7 @@ def action_build(settings, trees, mtimedb, mergecount += 1 if mergecount==0: - if "--noreplace" in myopts and favorites: + if "--noreplace" in myopts and not oneshot and favorites: print for x in favorites: print " %s %s" % (good("*"), x) diff --git a/pym/portage/__init__.py b/pym/portage/__init__.py index c51497eef..139fa57e0 100644 --- a/pym/portage/__init__.py +++ b/pym/portage/__init__.py @@ -5313,13 +5313,13 @@ def movefile(src,dest,newmtime=None,sstat=None,mysettings=None): return newmtime def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None, - mytree=None, mydbapi=None, vartree=None, prev_mtimes=None): + mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None): if not os.access(myroot, os.W_OK): writemsg("Permission denied: access('%s', W_OK)\n" % myroot, noiselevel=-1) return errno.EACCES mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree, - vartree=vartree) + vartree=vartree, blockers=blockers) return mylink.merge(pkgloc, infloc, myroot, myebuild, mydbapi=mydbapi, prev_mtimes=prev_mtimes) @@ -6184,7 +6184,8 @@ class FetchlistDict(UserDict.DictMixin): """Returns keys for all packages within pkgdir""" return self.portdb.cp_list(self.cp, mytree=self.mytree) -def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None): +def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, + vartree=None, prev_mtimes=None, blockers=None): """will merge a .tbz2 file, returning a list of runtime dependencies that must be satisfied, or None if there was a merge error. This code assumes the package exists.""" @@ -6277,7 +6278,7 @@ def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes #tbz2_lock = None mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree, - treetype="bintree") + treetype="bintree", blockers=blockers) retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0, mydbapi=mydbapi, prev_mtimes=prev_mtimes) did_merge_phase = True diff --git a/pym/portage/dbapi/vartree.py b/pym/portage/dbapi/vartree.py index 145016edc..f433789cd 100644 --- a/pym/portage/dbapi/vartree.py +++ b/pym/portage/dbapi/vartree.py @@ -932,7 +932,7 @@ class dblink(object): } def __init__(self, cat, pkg, myroot, mysettings, treetype=None, - vartree=None): + vartree=None, blockers=None): """ Creates a DBlink object for a given CPV. The given CPV may not be present in the database already. @@ -961,6 +961,7 @@ class dblink(object): from portage import db vartree = db[myroot]["vartree"] self.vartree = vartree + self._blockers = blockers self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH)) self.dbcatdir = self.dbroot+"/"+cat @@ -1037,6 +1038,11 @@ class dblink(object): if os.path.exists(self.dbdir+"/CONTENTS"): os.unlink(self.dbdir+"/CONTENTS") + def _clear_contents_cache(self): + self.contentscache = None + self._contents_inodes = None + self._contents_basenames = None + def getcontents(self): """ Get the installed files of a given package (aka what that package installed) @@ -1923,6 +1929,7 @@ class dblink(object): """ srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep + destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep if not os.path.isdir(srcroot): writemsg("!!! Directory Not Found: D='%s'\n" % srcroot, @@ -2063,8 +2070,11 @@ class dblink(object): self._preserve_libs(srcroot, destroot, myfilelist+mylinklist, counter, inforoot) # check for package collisions - collisions = self._collision_protect(srcroot, destroot, others_in_slot, - myfilelist+mylinklist) + blockers = self._blockers + if blockers is None: + blockers = [] + collisions = self._collision_protect(srcroot, destroot, + others_in_slot + blockers, myfilelist + mylinklist) # Make sure the ebuild environment is initialized and that ${T}/elog # exists for logging of collision-protect eerror messages. @@ -2284,6 +2294,44 @@ class dblink(object): self.dbdir = self.dbpkgdir self.delete() _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings) + + # Check for file collisions with blocking packages + # and remove any colliding files from their CONTENTS + # since they now belong to this package. + self._clear_contents_cache() + contents = self.getcontents() + destroot_len = len(destroot) - 1 + for blocker in blockers: + blocker_contents = blocker.getcontents() + collisions = [] + for filename in blocker_contents: + relative_filename = filename[destroot_len:] + if self.isowner(relative_filename, destroot): + collisions.append(filename) + if not collisions: + continue + for filename in collisions: + del blocker_contents[filename] + f = atomic_ofstream(os.path.join(blocker.dbdir, "CONTENTS")) + try: + for filename in sorted(blocker_contents): + entry_data = blocker_contents[filename] + entry_type = entry_data[0] + relative_filename = filename[destroot_len:] + if entry_type == "obj": + entry_type, mtime, md5sum = entry_data + line = "%s %s %s %s\n" % \ + (entry_type, relative_filename, md5sum, mtime) + elif entry_type == "sym": + entry_type, mtime, link = entry_data + line = "%s %s -> %s %s\n" % \ + (entry_type, relative_filename, link, mtime) + else: # dir, dev, fif + line = "%s %s\n" % (entry_type, relative_filename) + f.write(line) + finally: + f.close() + # Due to mtime granularity, mtime checks do not always properly # invalidate vardbapi caches. self.vartree.dbapi.mtdircache.pop(self.cat, None) |