summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZac Medico <zmedico@gentoo.org>2010-07-25 11:23:28 -0700
committerZac Medico <zmedico@gentoo.org>2010-07-25 11:23:28 -0700
commit9caa9b5a7a84c7288f9cda98a4ecf85d10aa30c1 (patch)
tree36c60968feec91f66df6f9f461ce78c07770c194
parent9a715a4754a88ee25aacbd92e3ea272e41962c5e (diff)
downloadportage-9caa9b5a7a84c7288f9cda98a4ecf85d10aa30c1.tar.gz
portage-9caa9b5a7a84c7288f9cda98a4ecf85d10aa30c1.tar.bz2
portage-9caa9b5a7a84c7288f9cda98a4ecf85d10aa30c1.zip
Add a gc.collect() call inside iter_owers(), since people are reporting
high memory usage there. We should really only use this function to search for a small number of files. Larger numbers of files should use a different algorithm that will ensure that each CONTENTS file is only parsed once.
-rw-r--r--pym/portage/dbapi/vartree.py2
1 files changed, 2 insertions, 0 deletions
diff --git a/pym/portage/dbapi/vartree.py b/pym/portage/dbapi/vartree.py
index 5a98c7e06..668cbda63 100644
--- a/pym/portage/dbapi/vartree.py
+++ b/pym/portage/dbapi/vartree.py
@@ -52,6 +52,7 @@ from portage.cache.mappings import slot_dict_class
import codecs
from collections import deque
+import gc
import re, shutil, stat, errno, copy, subprocess
import logging
import os as _os
@@ -1658,6 +1659,7 @@ class vardbapi(dbapi):
if len(dblink_fifo) >= 100:
# Ensure that we don't run out of memory.
del dblink_cache[dblink_fifo.popleft().mycpv]
+ gc.collect()
x = self._vardb._dblink(cpv)
dblink_cache[cpv] = x
dblink_fifo.append(x)