summaryrefslogtreecommitdiffstats
path: root/pym
diff options
context:
space:
mode:
authorDavid James <davidjames@google.com>2011-03-24 18:14:45 -0700
committerZac Medico <zmedico@gentoo.org>2011-03-26 12:21:27 -0700
commit23b334d6553fb637ebe8e38f78183e0955180f0f (patch)
tree69fd48d653c1d1e2026e4e057d5b321cbf244bd9 /pym
parentf0262dfcc83da93c9f539a16a7e5a2ebca30ee28 (diff)
downloadportage-23b334d6553fb637ebe8e38f78183e0955180f0f.tar.gz
portage-23b334d6553fb637ebe8e38f78183e0955180f0f.tar.bz2
portage-23b334d6553fb637ebe8e38f78183e0955180f0f.zip
Update Portage to sync BlockerDB at init.
Currently, Portage syncs the BlockerDB before every merge. This slows down merges considerably because it requires reading the entire vardb from disk. Since Portage doesn't merge conflicting packages anyway, we can optimize this by just reading the vardb at initialization and caching that. Change-Id: I6701926f022ef3aa2da10482fc8b09573ae24610 Review URL: http://codereview.chromium.org/6688037
Diffstat (limited to 'pym')
-rw-r--r--pym/_emerge/BlockerDB.py13
-rw-r--r--pym/_emerge/FakeVartree.py18
-rw-r--r--pym/_emerge/Scheduler.py24
3 files changed, 25 insertions, 30 deletions
diff --git a/pym/_emerge/BlockerDB.py b/pym/_emerge/BlockerDB.py
index fd4bf5ece..f5adc4d67 100644
--- a/pym/_emerge/BlockerDB.py
+++ b/pym/_emerge/BlockerDB.py
@@ -30,12 +30,7 @@ class BlockerDB(object):
"vartree" : fake_vartree,
}}
- def _get_fake_vartree(self, acquire_lock=0):
- fake_vartree = self._fake_vartree
- fake_vartree.sync(acquire_lock=acquire_lock)
- return fake_vartree
-
- def findInstalledBlockers(self, new_pkg, acquire_lock=0):
+ def findInstalledBlockers(self, new_pkg):
"""
Search for installed run-time blockers in the root where
new_pkg is planned to be installed. This ignores build-time
@@ -45,7 +40,7 @@ class BlockerDB(object):
dep_keys = ["RDEPEND", "PDEPEND"]
settings = self._vartree.settings
stale_cache = set(blocker_cache)
- fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
+ fake_vartree = self._fake_vartree
dep_check_trees = self._dep_check_trees
vardb = fake_vartree.dbapi
installed_pkgs = list(vardb)
@@ -118,3 +113,7 @@ class BlockerDB(object):
return blocking_pkgs
+ def discardBlocker(self, pkg):
+ """Discard a package from the list of potential blockers."""
+ self._fake_vartree.cpv_discard(pkg)
+
diff --git a/pym/_emerge/FakeVartree.py b/pym/_emerge/FakeVartree.py
index 977824c78..21bd355e9 100644
--- a/pym/_emerge/FakeVartree.py
+++ b/pym/_emerge/FakeVartree.py
@@ -100,6 +100,16 @@ class FakeVartree(vartree):
pkg, self.dbapi, self._global_updates)
return self._aux_get(pkg, wants)
+ def cpv_discard(self, pkg):
+ """
+ Discard a package from the fake vardb if it exists.
+ """
+ old_pkg = self.dbapi.get(pkg)
+ if old_pkg is not None:
+ self.dbapi.cpv_remove(old_pkg)
+ self._pkg_cache.pop(old_pkg, None)
+ self._aux_get_history.discard(old_pkg.cpv)
+
def sync(self, acquire_lock=1):
"""
Call this method to synchronize state with the real vardb
@@ -141,9 +151,7 @@ class FakeVartree(vartree):
# Remove any packages that have been uninstalled.
for pkg in list(pkg_vardb):
if pkg.cpv not in current_cpv_set:
- pkg_vardb.cpv_remove(pkg)
- pkg_cache.pop(pkg, None)
- aux_get_history.discard(pkg.cpv)
+ self.cpv_discard(pkg)
# Validate counters and timestamps.
slot_counters = {}
@@ -162,9 +170,7 @@ class FakeVartree(vartree):
if counter != pkg.counter or \
mtime != pkg.mtime:
- pkg_vardb.cpv_remove(pkg)
- pkg_cache.pop(pkg, None)
- aux_get_history.discard(pkg.cpv)
+ self.cpv_discard(pkg)
pkg = None
if pkg is None:
diff --git a/pym/_emerge/Scheduler.py b/pym/_emerge/Scheduler.py
index fa4825cf2..e4b6926c3 100644
--- a/pym/_emerge/Scheduler.py
+++ b/pym/_emerge/Scheduler.py
@@ -317,10 +317,6 @@ class Scheduler(PollScheduler):
Initialization structures used for dependency calculations
involving currently installed packages.
"""
- # TODO: Replace the BlockerDB with a depgraph of installed packages
- # that's updated incrementally with each upgrade/uninstall operation
- # This will be useful for making quick and safe decisions with respect
- # to aggressive parallelization discussed in bug #279623.
self._set_graph_config(graph_config)
self._blocker_db = {}
for root in self.trees:
@@ -329,6 +325,7 @@ class Scheduler(PollScheduler):
pkg_cache=self._pkg_cache)
else:
fake_vartree = graph_config.trees[root]['vartree']
+ fake_vartree.sync()
self._blocker_db[root] = BlockerDB(fake_vartree)
def _destroy_graph(self):
@@ -643,27 +640,20 @@ class Scheduler(PollScheduler):
def _find_blockers(self, new_pkg):
"""
- Returns a callable which should be called only when
- the vdb lock has been acquired.
+ Returns a callable.
"""
def get_blockers():
- return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
+ return self._find_blockers_impl(new_pkg)
return get_blockers
- def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
+ def _find_blockers_impl(self, new_pkg):
if self._opts_ignore_blockers.intersection(self.myopts):
return None
- # Call gc.collect() here to avoid heap overflow that
- # triggers 'Cannot allocate memory' errors (reported
- # with python-2.5).
- gc.collect()
-
blocker_db = self._blocker_db[new_pkg.root]
blocker_dblinks = []
- for blocking_pkg in blocker_db.findInstalledBlockers(
- new_pkg, acquire_lock=acquire_lock):
+ for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
if new_pkg.slot_atom == blocking_pkg.slot_atom:
continue
if new_pkg.cpv == blocking_pkg.cpv:
@@ -673,8 +663,6 @@ class Scheduler(PollScheduler):
self.pkgsettings[blocking_pkg.root], treetype="vartree",
vartree=self.trees[blocking_pkg.root]["vartree"]))
- gc.collect()
-
return blocker_dblinks
def _dblink_pkg(self, pkg_dblink):
@@ -1527,6 +1515,8 @@ class Scheduler(PollScheduler):
self._completed_tasks.add(pkg)
self._unsatisfied_system_deps.discard(pkg)
self._choose_pkg_return_early = False
+ blocker_db = self._blocker_db[pkg.root]
+ blocker_db.discardBlocker(pkg)
def _merge(self):