diff options
-rw-r--r-- | pym/portage/_global_updates.py | 263 | ||||
-rw-r--r-- | pym/portage/dbapi/__init__.py | 5 | ||||
-rw-r--r-- | pym/portage/dbapi/bintree.py | 4 | ||||
-rw-r--r-- | pym/portage/dbapi/vartree.py | 4 | ||||
-rw-r--r-- | pym/portage/update.py | 15 |
5 files changed, 165 insertions, 126 deletions
diff --git a/pym/portage/_global_updates.py b/pym/portage/_global_updates.py index 0e855c34e..a63e68af9 100644 --- a/pym/portage/_global_updates.py +++ b/pym/portage/_global_updates.py @@ -5,7 +5,7 @@ from __future__ import print_function import stat -from portage import os +from portage import best, os from portage.const import WORLD_FILE from portage.data import secpass from portage.exception import DirectoryNotFound @@ -16,7 +16,8 @@ from portage.util import grabfile, writemsg, writemsg_stdout, write_atomic def _global_updates(trees, prev_mtimes): """ - Perform new global updates if they exist in $PORTDIR/profiles/updates/. + Perform new global updates if they exist in 'profiles/updates/' + subdirectories of all active repositories (PORTDIR + PORTDIR_OVERLAY). This simply returns if ROOT != "/" (when len(trees) != 1). If ROOT != "/" then the user should instead use emaint --fix movebin and/or moveinst. @@ -37,123 +38,151 @@ def _global_updates(trees, prev_mtimes): return 0 root = "/" mysettings = trees["/"]["vartree"].settings - updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates") - - try: - if mysettings["PORTAGE_CALLER"] == "fixpackages": - update_data = grab_updates(updpath) - else: - update_data = grab_updates(updpath, prev_mtimes) - except DirectoryNotFound: - writemsg(_("--- 'profiles/updates' is empty or " - "not available. Empty portage tree?\n"), noiselevel=1) - return 0 - myupd = None - if len(update_data) > 0: - do_upgrade_packagesmessage = 0 - myupd = [] - timestamps = {} - for mykey, mystat, mycontent in update_data: - writemsg_stdout("\n\n") - writemsg_stdout(colorize("GOOD", - _("Performing Global Updates: "))+bold(mykey)+"\n") - writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n")) - writemsg_stdout(_(" %s='update pass' %s='binary update' " - "%s='/var/db update' %s='/var/db move'\n" - " %s='/var/db SLOT move' %s='binary move' " - "%s='binary SLOT move'\n %s='update /etc/portage/package.*'\n") % \ - (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p"))) - valid_updates, errors = parse_updates(mycontent) - myupd.extend(valid_updates) - writemsg_stdout(len(valid_updates) * "." + "\n") - if len(errors) == 0: - # Update our internal mtime since we - # processed all of our directives. - timestamps[mykey] = mystat[stat.ST_MTIME] + retupd = [] + + portdb = trees[root]["porttree"].dbapi + vardb = trees[root]["vartree"].dbapi + bindb = trees[root]["bintree"].dbapi + if not os.access(bindb.bintree.pkgdir, os.W_OK): + bindb = None + else: + # Call binarytree.populate(), since we want to make sure it's + # only populated with local packages here (getbinpkgs=0). + bindb.bintree.populate() + + world_file = os.path.join(root, WORLD_FILE) + world_list = grabfile(world_file) + world_modified = False + world_warnings = set() + + for repo_name in portdb.getRepositories(): + repo = portdb.getRepositoryPath(repo_name) + updpath = os.path.join(repo, "profiles", "updates") + + try: + if mysettings.get("PORTAGE_CALLER") == "fixpackages": + update_data = grab_updates(updpath) else: - for msg in errors: - writemsg("%s\n" % msg, noiselevel=-1) - - world_file = os.path.join(root, WORLD_FILE) - world_list = grabfile(world_file) - world_modified = False - for update_cmd in myupd: - for pos, atom in enumerate(world_list): - new_atom = update_dbentry(update_cmd, atom) - if atom != new_atom: - world_list[pos] = new_atom - world_modified = True - if world_modified: - world_list.sort() - write_atomic(world_file, - "".join("%s\n" % (x,) for x in world_list)) - - update_config_files("/", - mysettings.get("CONFIG_PROTECT","").split(), - mysettings.get("CONFIG_PROTECT_MASK","").split(), - myupd) - - vardb = trees["/"]["vartree"].dbapi - bindb = trees["/"]["bintree"].dbapi - if not os.access(bindb.bintree.pkgdir, os.W_OK): - bindb = None - else: - # Call binarytree.populate(), since we want to make sure it's - # only populated with local packages here (getbinpkgs=0). - bindb.bintree.populate() - for update_cmd in myupd: - if update_cmd[0] == "move": - moves = vardb.move_ent(update_cmd) - if moves: - writemsg_stdout(moves * "@") - if bindb: - moves = bindb.move_ent(update_cmd) + update_data = grab_updates(updpath, prev_mtimes) + except DirectoryNotFound: + continue + myupd = None + if len(update_data) > 0: + do_upgrade_packagesmessage = 0 + myupd = [] + timestamps = {} + for mykey, mystat, mycontent in update_data: + writemsg_stdout("\n\n") + writemsg_stdout(colorize("GOOD", + _("Performing Global Updates: "))+bold(mykey)+"\n") + writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n")) + writemsg_stdout(_(" %s='update pass' %s='binary update' " + "%s='/var/db update' %s='/var/db move'\n" + " %s='/var/db SLOT move' %s='binary move' " + "%s='binary SLOT move'\n %s='update /etc/portage/package.*'\n") % \ + (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p"))) + valid_updates, errors = parse_updates(mycontent) + myupd.extend(valid_updates) + writemsg_stdout(len(valid_updates) * "." + "\n") + if len(errors) == 0: + # Update our internal mtime since we + # processed all of our directives. + timestamps[mykey] = mystat[stat.ST_MTIME] + else: + for msg in errors: + writemsg("%s\n" % msg, noiselevel=-1) + retupd.extend(myupd) + + def _world_repo_match(atoma, atomb): + """ + Check whether to perform a world change from atoma to atomb. + If best vardb match for atoma comes from the same repository + as the update file, allow that. Additionally, if portdb still + can find a match for old atom name, warn about that. + """ + matches = vardb.match(atoma) + if matches and vardb.aux_get(best(matches), ['repository'])[0] == repo_name: + if portdb.match(atoma): + world_warnings.add((atoma, atomb)) + return True + else: + return False + + for update_cmd in myupd: + for pos, atom in enumerate(world_list): + new_atom = update_dbentry(update_cmd, atom) + if atom != new_atom: + if _world_repo_match(atom, new_atom): + world_list[pos] = new_atom + world_modified = True + update_config_files(root, + mysettings.get("CONFIG_PROTECT","").split(), + mysettings.get("CONFIG_PROTECT_MASK","").split(), + myupd, match_callback=_world_repo_match) + + for update_cmd in myupd: + if update_cmd[0] == "move": + moves = vardb.move_ent(update_cmd, repo_name=repo_name) if moves: - writemsg_stdout(moves * "%") - elif update_cmd[0] == "slotmove": - moves = vardb.move_slot_ent(update_cmd) - if moves: - writemsg_stdout(moves * "s") - if bindb: - moves = bindb.move_slot_ent(update_cmd) + writemsg_stdout(moves * "@") + if bindb: + moves = bindb.move_ent(update_cmd, repo_name=repo_name) + if moves: + writemsg_stdout(moves * "%") + elif update_cmd[0] == "slotmove": + moves = vardb.move_slot_ent(update_cmd, repo_name=repo_name) if moves: - writemsg_stdout(moves * "S") - - # The above global updates proceed quickly, so they - # are considered a single mtimedb transaction. - if len(timestamps) > 0: - # We do not update the mtime in the mtimedb - # until after _all_ of the above updates have - # been processed because the mtimedb will - # automatically commit when killed by ctrl C. - for mykey, mtime in timestamps.items(): - prev_mtimes[mykey] = mtime - - # We gotta do the brute force updates for these now. - if mysettings["PORTAGE_CALLER"] == "fixpackages" or \ - "fixpackages" in mysettings.features: - def onUpdate(maxval, curval): - if curval > 0: - writemsg_stdout("#") - vardb.update_ents(myupd, onUpdate=onUpdate) - if bindb: + writemsg_stdout(moves * "s") + if bindb: + moves = bindb.move_slot_ent(update_cmd, repo_name=repo_name) + if moves: + writemsg_stdout(moves * "S") + + # The above global updates proceed quickly, so they + # are considered a single mtimedb transaction. + if len(timestamps) > 0: + # We do not update the mtime in the mtimedb + # until after _all_ of the above updates have + # been processed because the mtimedb will + # automatically commit when killed by ctrl C. + for mykey, mtime in timestamps.items(): + prev_mtimes[mykey] = mtime + + # We gotta do the brute force updates for these now. + if mysettings.get("PORTAGE_CALLER") == "fixpackages" or \ + "fixpackages" in mysettings.features: def onUpdate(maxval, curval): if curval > 0: - writemsg_stdout("*") - bindb.update_ents(myupd, onUpdate=onUpdate) - else: - do_upgrade_packagesmessage = 1 - - # Update progress above is indicated by characters written to stdout so - # we print a couple new lines here to separate the progress output from - # what follows. - print() - print() - - if do_upgrade_packagesmessage and bindb and \ - bindb.cpv_all(): - writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n")) - writemsg_stdout(bold(_("Note: This can take a very long time."))) - writemsg_stdout("\n") - if myupd: - return myupd + writemsg_stdout("#") + vardb.update_ents(myupd, onUpdate=onUpdate) + if bindb: + def onUpdate(maxval, curval): + if curval > 0: + writemsg_stdout("*") + bindb.update_ents(myupd, onUpdate=onUpdate) + else: + do_upgrade_packagesmessage = 1 + + # Update progress above is indicated by characters written to stdout so + # we print a couple new lines here to separate the progress output from + # what follows. + print() + print() + + if do_upgrade_packagesmessage and bindb and \ + bindb.cpv_all(): + writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n")) + writemsg_stdout(bold(_("Note: This can take a very long time."))) + writemsg_stdout("\n") + + if world_modified: + world_list.sort() + write_atomic(world_file, + "".join("%s\n" % (x,) for x in world_list)) + if world_warnings: + # XXX: print warning that we've updated world entries + # and the old name still matches something (from an overlay)? + pass + + if retupd: + return retupd diff --git a/pym/portage/dbapi/__init__.py b/pym/portage/dbapi/__init__.py index 2467cb54e..c2e7989b8 100644 --- a/pym/portage/dbapi/__init__.py +++ b/pym/portage/dbapi/__init__.py @@ -230,10 +230,11 @@ class dbapi(object): if onProgress: onProgress(maxval, i+1) - def move_slot_ent(self, mylist): + def move_slot_ent(self, mylist, repo_name = None): """This function takes a sequence: Args: mylist: a sequence of (package, originalslot, newslot) + repo_name: repository from which update is originated Returns: The number of slotmoves this function did """ @@ -248,6 +249,8 @@ class dbapi(object): slot = self.aux_get(mycpv, ["SLOT"])[0] if slot != origslot: continue + if repo_name and self.aux_get(mycpv, ['repository'])[0] != repo_name: + continue moves += 1 mydata = {"SLOT": newslot+"\n"} self.aux_update(mycpv, mydata) diff --git a/pym/portage/dbapi/bintree.py b/pym/portage/dbapi/bintree.py index 080f9e5ff..84c51a977 100644 --- a/pym/portage/dbapi/bintree.py +++ b/pym/portage/dbapi/bintree.py @@ -288,7 +288,7 @@ class binarytree(object): remotepkgs = property(_get_remotepkgs, _set_remotepkgs, _del_remotepkgs, "Deprecated self.remotepkgs, only for backward compatibility") - def move_ent(self, mylist): + def move_ent(self, mylist, repo_name = None): if not self.populated: self.populate() origcp = mylist[1] @@ -307,6 +307,8 @@ class binarytree(object): if mycpv_cp != origcp: # Ignore PROVIDE virtual match. continue + if repo_name and self.aux_get(mycpv, ['repository'])[0] != repo_name: + continue mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1) myoldpkg = catsplit(mycpv)[1] mynewpkg = catsplit(mynewcpv)[1] diff --git a/pym/portage/dbapi/vartree.py b/pym/portage/dbapi/vartree.py index 24588fb2e..6dbb51379 100644 --- a/pym/portage/dbapi/vartree.py +++ b/pym/portage/dbapi/vartree.py @@ -923,7 +923,7 @@ class vardbapi(dbapi): return True return False - def move_ent(self, mylist): + def move_ent(self, mylist, repo_name = None): origcp = mylist[1] newcp = mylist[2] @@ -940,6 +940,8 @@ class vardbapi(dbapi): if mycpv_cp != origcp: # Ignore PROVIDE virtual match. continue + if repo_name and self.aux_get(mycpv, ['repository'])[0] != repo_name: + continue mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1) mynewcat = catsplit(newcp)[0] origpath = self.getpath(mycpv) diff --git a/pym/portage/update.py b/pym/portage/update.py index 7d48e1d17..b24c9cb0c 100644 --- a/pym/portage/update.py +++ b/pym/portage/update.py @@ -182,12 +182,14 @@ def parse_updates(mycontent): myupd.append(mysplit) return myupd, errors -def update_config_files(config_root, protect, protect_mask, update_iter): +def update_config_files(config_root, protect, protect_mask, update_iter, match_callback = None): """Perform global updates on /etc/portage/package.*. config_root - location of files to update protect - list of paths from CONFIG_PROTECT protect_mask - list of paths from CONFIG_PROTECT_MASK - update_iter - list of update commands as returned from parse_updates()""" + update_iter - list of update commands as returned from parse_updates() + match_callback - a callback which will be called with old and new atoms + and should return boolean value determining whether to perform the update""" config_root = normalize_path(config_root) update_files = {} @@ -253,10 +255,11 @@ def update_config_files(config_root, protect, protect_mask, update_iter): continue new_atom = update_dbentry(update_cmd, atom) if atom != new_atom: - contents[pos] = line.replace(atom, new_atom) - update_files[x] = 1 - sys.stdout.write("p") - sys.stdout.flush() + if match_callback(atom, new_atom): + contents[pos] = line.replace(atom, new_atom) + update_files[x] = 1 + sys.stdout.write("p") + sys.stdout.flush() protect_obj = ConfigProtect( config_root, protect, protect_mask) |