From 9caa9b5a7a84c7288f9cda98a4ecf85d10aa30c1 Mon Sep 17 00:00:00 2001 From: Zac Medico Date: Sun, 25 Jul 2010 11:23:28 -0700 Subject: Add a gc.collect() call inside iter_owers(), since people are reporting high memory usage there. We should really only use this function to search for a small number of files. Larger numbers of files should use a different algorithm that will ensure that each CONTENTS file is only parsed once. --- pym/portage/dbapi/vartree.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'pym') diff --git a/pym/portage/dbapi/vartree.py b/pym/portage/dbapi/vartree.py index 5a98c7e06..668cbda63 100644 --- a/pym/portage/dbapi/vartree.py +++ b/pym/portage/dbapi/vartree.py @@ -52,6 +52,7 @@ from portage.cache.mappings import slot_dict_class import codecs from collections import deque +import gc import re, shutil, stat, errno, copy, subprocess import logging import os as _os @@ -1658,6 +1659,7 @@ class vardbapi(dbapi): if len(dblink_fifo) >= 100: # Ensure that we don't run out of memory. del dblink_cache[dblink_fifo.popleft().mycpv] + gc.collect() x = self._vardb._dblink(cpv) dblink_cache[cpv] = x dblink_fifo.append(x) -- cgit v1.2.3-1-g7c22