aboutsummaryrefslogtreecommitdiff
path: root/pym
diff options
context:
space:
mode:
Diffstat (limited to 'pym')
-rw-r--r--pym/cvstree.py295
-rw-r--r--pym/dcdialog.py412
-rw-r--r--pym/dispatch_conf.py162
-rw-r--r--pym/emergehelp.py370
-rw-r--r--pym/getbinpkg.py541
-rw-r--r--pym/output.py167
-rw-r--r--pym/portage.py7452
-rw-r--r--pym/portage.py.orig7427
-rw-r--r--pym/portage_checksum.py134
-rw-r--r--pym/portage_const.py48
-rw-r--r--pym/portage_contents.py161
-rw-r--r--pym/portage_data.py85
-rw-r--r--pym/portage_db_anydbm.py64
-rw-r--r--pym/portage_db_cpickle.py79
-rw-r--r--pym/portage_db_flat.py113
-rw-r--r--pym/portage_db_template.py174
-rw-r--r--pym/portage_db_test.py21
-rw-r--r--pym/portage_dep.py155
-rw-r--r--pym/portage_exception.py163
-rw-r--r--pym/portage_exec.py215
-rw-r--r--pym/portage_file.py62
-rw-r--r--pym/portage_gpg.py149
-rw-r--r--pym/portage_localization.py21
-rw-r--r--pym/portage_locks.py360
-rw-r--r--pym/portage_util.py459
-rw-r--r--pym/xpak.py384
26 files changed, 19673 insertions, 0 deletions
diff --git a/pym/cvstree.py b/pym/cvstree.py
new file mode 100644
index 000000000..4e883dd7b
--- /dev/null
+++ b/pym/cvstree.py
@@ -0,0 +1,295 @@
+# cvstree.py -- cvs tree utilities
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/cvstree.py,v 1.12.2.1 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: cvstree.py,v 1.12.2.1 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+import string,os,time,sys,re
+from stat import *
+
+# [D]/Name/Version/Date/Flags/Tags
+
+def pathdata(entries, path):
+ """(entries,path)
+ Returns the data(dict) for a specific file/dir at the path specified."""
+ mysplit=string.split(path,"/")
+ myentries=entries
+ mytarget=mysplit[-1]
+ mysplit=mysplit[:-1]
+ for mys in mysplit:
+ if myentries["dirs"].has_key(mys):
+ myentries=myentries["dirs"][mys]
+ else:
+ return None
+ if myentries["dirs"].has_key(mytarget):
+ return myentries["dirs"][mytarget]
+ elif myentries["files"].has_key(mytarget):
+ return myentries["files"][mytarget]
+ else:
+ return None
+
+def fileat(entries, path):
+ return pathdata(entries,path)
+
+def isadded(entries, path):
+ """(entries,path)
+ Returns true if the path exists and is added to the cvs tree."""
+ mytarget=pathdata(entries, path)
+ if mytarget:
+ if "cvs" in mytarget["status"]:
+ return 1
+
+ basedir=os.path.dirname(path)
+ filename=os.path.basename(path)
+
+ try:
+ myfile=open(basedir+"/CVS/Entries","r")
+ except IOError:
+ return 0
+ mylines=myfile.readlines()
+ myfile.close()
+
+ rep=re.compile("^\/"+re.escape(filename)+"\/");
+ for x in mylines:
+ if rep.search(x):
+ return 1
+
+ return 0
+
+def findnew(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that have been added but
+ have not yet been committed. Returns a list of paths, optionally prepended
+ with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "0" == entries["files"][myfile]["revision"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findnew(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findchanged(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that exist in the cvs tree
+ and differ from the committed version. Returns a list of paths, optionally
+ prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "current" not in entries["files"][myfile]["status"]:
+ if "exists" in entries["files"][myfile]["status"]:
+ if entries["files"][myfile]["revision"]!="0":
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findchanged(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findmissing(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are listed in the cvs
+ tree but do not exist on the filesystem. Returns a list of paths,
+ optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "exists" not in entries["files"][myfile]["status"]:
+ if "removed" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findmissing(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findunadded(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are in valid cvs
+ directories but are not part of the cvs tree. Returns a list of paths,
+ optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+
+ #ignore what cvs ignores.
+ for myfile in entries["files"].keys():
+ if "cvs" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findunadded(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findremoved(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are in flagged for cvs
+ deletions. Returns a list of paths, optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "removed" in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findremoved(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findall(entries, recursive=0, basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all new, changed, missing, and unadded
+ entities. Returns a 4 element list of lists as returned from each find*()."""
+
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mynew = findnew(entries,recursive,basedir)
+ mychanged = findchanged(entries,recursive,basedir)
+ mymissing = findmissing(entries,recursive,basedir)
+ myunadded = findunadded(entries,recursive,basedir)
+ myremoved = findremoved(entries,recursive,basedir)
+ return [mynew, mychanged, mymissing, myunadded, myremoved]
+
+ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
+def apply_cvsignore_filter(list):
+ x=0
+ while x < len(list):
+ if ignore_list.match(list[x].split("/")[-1]):
+ list.pop(x)
+ else:
+ x+=1
+ return list
+
+def getentries(mydir,recursive=0):
+ """(basedir,recursive=0)
+ Scans the given directory and returns an datadict of all the entries in
+ the directory seperated as a dirs dict and a files dict."""
+ myfn=mydir+"/CVS/Entries"
+ # entries=[dirs, files]
+ entries={"dirs":{},"files":{}}
+ if not os.path.exists(mydir):
+ return entries
+ try:
+ myfile=open(myfn, "r")
+ mylines=myfile.readlines()
+ myfile.close()
+ except SystemExit, e:
+ raise
+ except:
+ mylines=[]
+ for line in mylines:
+ if line and line[-1]=="\n":
+ line=line[:-1]
+ if not line:
+ continue
+ if line=="D": # End of entries file
+ break
+ mysplit=string.split(line, "/")
+ if len(mysplit)!=6:
+ print "Confused:",mysplit
+ continue
+ if mysplit[0]=="D":
+ entries["dirs"][mysplit[1]]={"dirs":{},"files":{},"status":[]}
+ entries["dirs"][mysplit[1]]["status"]=["cvs"]
+ if os.path.isdir(mydir+"/"+mysplit[1]):
+ entries["dirs"][mysplit[1]]["status"]+=["exists"]
+ entries["dirs"][mysplit[1]]["flags"]=mysplit[2:]
+ if recursive:
+ rentries=getentries(mydir+"/"+mysplit[1],recursive)
+ #print rentries.keys()
+ #print entries["files"].keys()
+ #print entries["files"][mysplit[1]]
+ entries["dirs"][mysplit[1]]["dirs"]=rentries["dirs"]
+ entries["dirs"][mysplit[1]]["files"]=rentries["files"]
+ else:
+ # [D]/Name/revision/Date/Flags/Tags
+ entries["files"][mysplit[1]]={}
+ entries["files"][mysplit[1]]["revision"]=mysplit[2]
+ entries["files"][mysplit[1]]["date"]=mysplit[3]
+ entries["files"][mysplit[1]]["flags"]=mysplit[4]
+ entries["files"][mysplit[1]]["tags"]=mysplit[5]
+ entries["files"][mysplit[1]]["status"]=["cvs"]
+ if entries["files"][mysplit[1]]["revision"][0]=="-":
+ entries["files"][mysplit[1]]["status"]+=["removed"]
+
+ for file in apply_cvsignore_filter(os.listdir(mydir)):
+ if file=="CVS":
+ continue
+ if file=="digest-framerd-2.4.3":
+ print mydir,file
+ if os.path.isdir(mydir+"/"+file):
+ if not entries["dirs"].has_key(file):
+ entries["dirs"][file]={"dirs":{},"files":{}}
+ if entries["dirs"][file].has_key("status"):
+ if "exists" not in entries["dirs"][file]["status"]:
+ entries["dirs"][file]["status"]+=["exists"]
+ else:
+ entries["dirs"][file]["status"]=["exists"]
+ elif os.path.isfile(mydir+"/"+file):
+ if file=="digest-framerd-2.4.3":
+ print "isfile"
+ if not entries["files"].has_key(file):
+ entries["files"][file]={"revision":"","date":"","flags":"","tags":""}
+ if entries["files"][file].has_key("status"):
+ if file=="digest-framerd-2.4.3":
+ print "has status"
+ if "exists" not in entries["files"][file]["status"]:
+ if file=="digest-framerd-2.4.3":
+ print "no exists in status"
+ entries["files"][file]["status"]+=["exists"]
+ else:
+ if file=="digest-framerd-2.4.3":
+ print "no status"
+ entries["files"][file]["status"]=["exists"]
+ try:
+ if file=="digest-framerd-2.4.3":
+ print "stat'ing"
+ mystat=os.stat(mydir+"/"+file)
+ mytime=time.asctime(time.gmtime(mystat[ST_MTIME]))
+ if not entries["files"][file].has_key("status"):
+ if file=="digest-framerd-2.4.3":
+ print "status not set"
+ entries["files"][file]["status"]=[]
+ if file=="digest-framerd-2.4.3":
+ print "date:",entries["files"][file]["date"]
+ print "sdate:",mytime
+ if mytime==entries["files"][file]["date"]:
+ entries["files"][file]["status"]+=["current"]
+ if file=="digest-framerd-2.4.3":
+ print "stat done"
+
+ del mystat
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "failed to stat",file
+ print e
+ return
+
+ else:
+ print
+ print "File of unknown type:",mydir+"/"+file
+ print
+ return entries
+
+#class cvstree:
+# def __init__(self,basedir):
+# self.refdir=os.cwd()
+# self.basedir=basedir
+# self.entries={}
+# self.entries["dirs"]={}
+# self.entries["files"]={}
+# self.entries["dirs"][self.basedir]=getentries(self.basedir)
+# self.getrealdirs(self.dirs, self.files)
+# def getrealdirs(self,dirs,files):
+# for mydir in dirs.keys():
+# list = os.listdir(
+
+
diff --git a/pym/dcdialog.py b/pym/dcdialog.py
new file mode 100644
index 000000000..64d39983c
--- /dev/null
+++ b/pym/dcdialog.py
@@ -0,0 +1,412 @@
+#
+# Changes and extensions by Carlos Castillo...
+#
+cvs_id_string="$Id: dcdialog.py,v 1.1.2.1 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+#
+# Module: dialog.py
+# Copyright (c) 2000 Robb Shecter <robb@acm.org>
+# All rights reserved.
+# This source is covered by the GNU GPL.
+#
+# This module is a Python wrapper around the Linux "dialog" utility
+# by Savio Lam and Stuart Herbert. My goals were to make dialog as
+# easy to use from Python as possible. The demo code at the end of
+# the module is a good example of how to use it. To run the demo,
+# execute:
+#
+# python dialog.py
+#
+# This module has one class in it, "Dialog". An application typically
+# creates an instance of it, and possibly sets the background title option.
+# Then, methods can be called on it for interacting with the user.
+#
+# I wrote this because I want to use my 486-33 laptop as my main
+# development computer (!), and I wanted a way to nicely interact with the
+# user in console mode. There are apparently other modules out there
+# with similar functionality, but they require the Python curses library.
+# Writing this module from scratch was easier than figuring out how to
+# recompile Python with curses enabled. :)
+#
+# One interesting feature is that the menu and selection windows allow
+# *any* objects to be displayed and selected, not just strings.
+#
+# TO DO:
+# Add code so that the input buffer is flushed before a dialog box is
+# shown. This would make the UI more predictable for users. This
+# feature could be turned on and off through an instance method.
+# Drop using temporary files when interacting with 'dialog'
+# (it's possible -- I've already tried :-).
+# Try detecting the terminal window size in order to make reasonable
+# height and width defaults. Hmmm - should also then check for
+# terminal resizing...
+# Put into a package name to make more reusable - reduce the possibility
+# of name collisions.
+#
+# NOTES:
+# there is a bug in (at least) Linux-Mandrake 7.0 Russian Edition
+# running on AMD K6-2 3D that causes core dump when 'dialog'
+# is running with --gauge option;
+# in this case you'll have to recompile 'dialog' program.
+#
+# Modifications:
+# Jul 2000, Sultanbek Tezadov (http://sultan.da.ru)
+# Added:
+# - 'gauge' widget *)
+# - 'title' option to some widgets
+# - 'checked' option to checklist dialog; clicking "Cancel" is now
+# recognizable
+# - 'selected' option to radiolist dialog; clicking "Cancel" is now
+# recognizable
+# - some other cosmetic changes and improvements
+#
+
+import os
+from tempfile import mktemp
+from string import split
+from time import sleep
+
+#
+# Path of the dialog executable
+#
+DIALOG="/usr/bin/dialog"
+
+
+class Dialog:
+ def __init__(self):
+ self.__bgTitle = '' # Default is no background title
+
+
+ def setBackgroundTitle(self, text):
+ self.__bgTitle = '--backtitle "%s"' % text
+
+
+ def __perform(self, cmd):
+ """Do the actual work of invoking dialog and getting the output."""
+ fName = mktemp()
+ rv = os.system('%s %s %s 2> %s' % (DIALOG, self.__bgTitle, cmd, fName))
+ f = open(fName)
+ output = f.readlines()
+ f.close()
+ os.unlink(fName)
+ return (rv, output)
+
+
+ def __perform_no_options(self, cmd):
+ """Call dialog w/out passing any more options. Needed by --clear."""
+ return os.system(DIALOG + ' ' + cmd)
+
+
+ def __handleTitle(self, title):
+ if len(title) == 0:
+ return ''
+ else:
+ return '--title "%s" ' % title
+
+
+ def yesno(self, text, height=10, width=30, title=''):
+ """
+ Put a Yes/No question to the user.
+ Uses the dialog --yesno option.
+ Returns a 1 or a 0.
+ """
+ (code, output) = self.__perform(self.__handleTitle(title) +\
+ '--yesno "%s" %d %d' % (text, height, width))
+ return code == 0
+
+
+ def msgbox(self, text, height=10, width=30, title=''):
+ """
+ Pop up a message to the user which has to be clicked
+ away with "ok".
+ """
+ self.__perform(self.__handleTitle(title) +\
+ '--msgbox "%s" %d %d' % (text, height, width))
+
+
+ def infobox(self, text, height=10, width=30):
+ """Make a message to the user, and return immediately."""
+ self.__perform('--infobox "%s" %d %d' % (text, height, width))
+
+
+ def inputbox(self, text, height=10, width=30, init='', title=''):
+ """
+ Request a line of input from the user.
+ Returns the user's input or None if cancel was chosen.
+ """
+ (c, o) = self.__perform(self.__handleTitle(title) +\
+ '--inputbox "%s" %d %d "%s"' % (text, height, width, init))
+ try:
+ return o[0]
+ except IndexError:
+ if c == 0: # empty string entered
+ return ''
+ else: # canceled
+ return None
+
+
+ def textbox(self, filename, height=20, width=60, title=None):
+ """Display a file in a scrolling text box."""
+ if title is None:
+ title = filename
+ self.__perform(self.__handleTitle(title) +\
+ ' --textbox "%s" %d %d' % (filename, height, width))
+
+
+ def menu(self, text, height=15, width=54, list=[]):
+ """
+ Display a menu of options to the user. This method simplifies the
+ --menu option of dialog, which allows for complex arguments. This
+ method receives a simple list of objects, and each one is assigned
+ a choice number.
+ The selected object is returned, or None if the dialog was canceled.
+ """
+ menuheight = height - 8
+ pairs = map(lambda i, item: (i + 1, item), range(len(list)), list)
+ choices = reduce(lambda res, pair: res + '%d "%s" ' % pair, pairs, '')
+ (code, output) = self.__perform('--menu "%s" %d %d %d %s' %\
+ (text, height, width, menuheight, choices))
+ try:
+ return list[int(output[0]) - 1]
+ except IndexError:
+ return None
+
+ def menu_ext(self, text, height=15, width=54, list=[], list2=[]):
+ """
+ Extended the method above for (string, string) pairs, for GLIS UI
+ """
+ menuheight = height - 8
+ pairs = []
+ for i in range(len(list)):
+ pairs.append((list2[i],list[i]))
+ #pairs = map(lambda i, item: (i + 1, item), range(len(list)), list)
+ choices = reduce(lambda res, pair: res + '%s "%s" ' % pair, pairs, '')
+ (code, output) = self.__perform('--menu "%s" %d %d %d %s' %\
+ (text, height, width, menuheight, choices))
+ try:
+ return output[0]
+ except IndexError:
+ return None
+
+
+ def checklist(self, text, height=15, width=54, list=[], checked=None):
+ """
+ Returns a list of the selected objects.
+ Returns an empty list if nothing was selected.
+ Returns None if the window was canceled.
+ checked -- a list of boolean (0/1) values; len(checked) must equal
+ len(list).
+ """
+ if checked is None:
+ checked = [0]*len(list)
+ menuheight = height - 8
+ triples = map(
+ lambda i, item, onoff, fs=('off', 'on'): (i + 1, item, fs[onoff]),
+ range(len(list)), list, checked)
+ choices = reduce(lambda res, triple: res + '%d "%s" %s ' % triple,
+ triples, '')
+ (c, o) = self.__perform('--checklist "%s" %d %d %d %s' %\
+ (text, height, width, menuheight, choices))
+ try:
+ output = o[0]
+ indexList = map(lambda x: int(x[1:-1]), split(output))
+ objectList = filter(lambda item, list=list, indexList=indexList:
+ list.index(item) + 1 in indexList,
+ list)
+ return objectList
+ except IndexError:
+ if c == 0: # Nothing was selected
+ return []
+ return None # Was canceled
+
+ def checklist_ext(self, text, height=15, width=54, list=[], list2=[], checked=None):
+ """
+ Returns a list of the selected objects.
+ Returns an empty list if nothing was selected.
+ Returns None if the window was canceled.
+ checked -- a list of boolean (0/1) values; len(checked) must equal
+ len(list).
+ """
+ if checked is None:
+ checked = [0]*len(list)
+ menuheight = height - 8
+ triples = []
+ #equally 3 lines, much more readable
+ fs = ('off','on')
+ for i in range(len(list)):
+ triples.append((list2[i],list[i],fs[checked[i]]))
+
+## triples = map(
+## lambda i, item, onoff, fs=('off', 'on'): (i + 1, item, fs[onoff]),
+## range(len(list)), list, checked)
+ choices = reduce(lambda res, triple: res + '%s "%s" %s ' % triple,
+ triples, '')
+ (c, o) = self.__perform('--checklist "%s" %d %d %d %s' %\
+ (text, height, width, menuheight, choices))
+ try:
+ output = o[0]
+ return split(output)
+## indexList = map(lambda x: int(x[1:-1]), split(output))
+## objectList = filter(lambda item, list=list, indexList=indexList:
+## list.index(item) + 1 in indexList,
+## list)
+## return objectList
+ except IndexError:
+ if c == 0: # Nothing was selected
+ return []
+ return None # Was canceled
+
+
+ def radiolist(self, text, height=15, width=54, list=[], selected=0):
+ """
+ Return the selected object.
+ Returns empty string if no choice was selected.
+ Returns None if window was canceled.
+ selected -- the selected item (must be between 1 and len(list)
+ or 0, meaning no selection).
+ """
+ menuheight = height - 8
+ triples = map(lambda i, item: (i + 1, item, 'off'),
+ range(len(list)), list)
+ if selected:
+ i, item, tmp = triples[selected - 1]
+ triples[selected - 1] = (i, item, 'on')
+ choices = reduce(lambda res, triple: res + '%d "%s" %s ' % triple,
+ triples, '')
+ (c, o) = self.__perform('--radiolist "%s" %d %d %d %s' %\
+ (text, height, width, menuheight, choices))
+ try:
+ return list[int(o[0]) - 1]
+ except IndexError:
+ if c == 0:
+ return ''
+ return None
+
+
+
+ def clear(self):
+ """
+ Clear the screen. Equivalent to the dialog --clear option.
+ """
+ self.__perform_no_options('--clear')
+
+
+ def scrollbox(self, text, height=20, width=60, title=''):
+ """
+ This is a bonus method. The dialog package only has a function to
+ display a file in a scrolling text field. This method allows any
+ string to be displayed by first saving it in a temp file, and calling
+ --textbox.
+ """
+ fName = mktemp()
+ f = open(fName, 'w')
+ f.write(text)
+ f.close()
+ self.__perform(self.__handleTitle(title) +\
+ '--textbox "%s" %d %d' % (fName, height, width))
+ os.unlink(fName)
+
+
+ def gauge_start(self, perc=0, text='', height=8, width=54, title=''):
+ """
+ Display gauge output window.
+ Gauge normal usage (assuming that there is an instace of 'Dialog'
+ class named 'd'):
+ d.gauge_start()
+ # do something
+ d.gauge_iterate(10) # passed throgh 10%
+ # ...
+ d.gauge_iterate(100, 'any text here') # work is done
+ d.stop_gauge() # clean-up actions
+ """
+ cmd = self.__handleTitle(title) +\
+ '--gauge "%s" %d %d %d' % (text, height, width, perc)
+ cmd = '%s %s %s 2> /dev/null' % (DIALOG, self.__bgTitle, cmd)
+ self.pipe = os.popen(cmd, 'w')
+ #/gauge_start()
+
+
+ def gauge_iterate(self, perc, text=''):
+ """
+ Update percentage point value.
+
+ See gauge_start() function above for the usage.
+ """
+ if text:
+ text = 'XXX\n%d\n%s\nXXX\n' % (perc, text)
+ else:
+ text = '%d\n' % perc
+ self.pipe.write(text)
+ self.pipe.flush()
+ #/gauge_iterate()
+
+
+ def gauge_stop(self):
+ """
+ Finish previously started gauge.
+
+ See gauge_start() function above for the usage.
+ """
+ self.pipe.close()
+ #/gauge_stop()
+
+
+
+#
+# DEMO APPLICATION
+#
+if __name__ == '__main__':
+ """
+ This demo tests all the features of the class.
+ """
+ d = Dialog()
+ d.setBackgroundTitle('dialog.py demo')
+
+ d.infobox(
+ "One moment... Just wasting some time here to test the infobox...")
+ sleep(3)
+
+ if d.yesno("Do you like this demo?"):
+ d.msgbox("Excellent! Here's the source code:")
+ else:
+ d.msgbox("Send your complaints to /dev/null")
+
+ d.textbox("dialog.py")
+
+ name = d.inputbox("What's your name?", init="Snow White")
+ fday = d.menu("What's your favorite day of the week?",
+ list=["Monday", "Tuesday", "Wednesday", "Thursday",
+ "Friday (The best day of all)", "Saturday", "Sunday"])
+ food = d.checklist("What sandwich toppings do you like?",
+ list=["Catsup", "Mustard", "Pesto", "Mayonaise", "Horse radish",
+ "Sun-dried tomatoes"], checked=[0,0,0,1,1,1])
+ sand = d.radiolist("What's your favorite kind of sandwich?",
+ list=["Hamburger", "Hotdog", "Burrito", "Doener", "Falafel",
+ "Bagel", "Big Mac", "Whopper", "Quarter Pounder",
+ "Peanut Butter and Jelly", "Grilled cheese"], selected=4)
+
+ # Prepare the message for the final window
+ bigMessage = "Here are some vital statistics about you:\n\nName: " + name +\
+ "\nFavorite day of the week: " + fday +\
+ "\nFavorite sandwich toppings:\n"
+ for topping in food:
+ bigMessage = bigMessage + " " + topping + "\n"
+ bigMessage = bigMessage + "Favorite sandwich: " + str(sand)
+
+ d.scrollbox(bigMessage)
+
+ #<># Gauge Demo
+ d.gauge_start(0, 'percentage: 0', title='Gauge Demo')
+ for i in range(1, 101):
+ if i < 50:
+ msg = 'percentage: %d' % i
+ elif i == 50:
+ msg = 'Over 50%'
+ else:
+ msg = ''
+ d.gauge_iterate(i, msg)
+ sleep(0.1)
+ d.gauge_stop()
+ #<>#
+
+ d.clear()
diff --git a/pym/dispatch_conf.py b/pym/dispatch_conf.py
new file mode 100644
index 000000000..27d737123
--- /dev/null
+++ b/pym/dispatch_conf.py
@@ -0,0 +1,162 @@
+# archive_conf.py -- functionality common to archive-conf and dispatch-conf
+# Copyright 2003-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/dispatch_conf.py,v 1.3.2.3 2005/04/29 03:37:30 jstubbs Exp $
+cvs_id_string="$Id: dispatch_conf.py,v 1.3.2.3 2005/04/29 03:37:30 jstubbs Exp $"[5:-2]
+
+# Library by Wayne Davison <gentoo@blorf.net>, derived from code
+# written by Jeremy Wohl (http://igmus.org)
+
+from stat import *
+import os, sys, commands, shutil
+
+sys.path = ["/usr/lib/portage/pym"]+sys.path
+import portage
+
+RCS_BRANCH = '1.1.1'
+RCS_LOCK = 'rcs -ko -M -l'
+RCS_PUT = 'ci -t-"Archived config file." -m"dispatch-conf update."'
+RCS_GET = 'co'
+RCS_MERGE = 'rcsmerge -p -r' + RCS_BRANCH + ' %s >%s'
+
+DIFF3_MERGE = 'diff3 -mE %s %s %s >%s'
+
+def read_config(mandatory_opts):
+ try:
+ opts = portage.getconfig('/etc/dispatch-conf.conf')
+ except:
+ opts = None
+
+ if not opts:
+ print >> sys.stderr, 'dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'
+ sys.exit(1)
+
+ for key in mandatory_opts:
+ if not opts.has_key(key):
+ if key == "merge":
+ opts["merge"] = "sdiff --suppress-common-lines --output=%s %s %s"
+ else:
+ print >> sys.stderr, 'dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal' % (key,)
+
+ if not os.path.exists(opts['archive-dir']):
+ os.mkdir(opts['archive-dir'])
+ elif not os.path.isdir(opts['archive-dir']):
+ print >> sys.stderr, 'dispatch-conf: Config archive dir [%s] must exist; fatal' % (opts['archive-dir'],)
+ sys.exit(1)
+
+ return opts
+
+
+def rcs_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config in rcs (on trunk). Then, if mrgconf is
+ specified and an old branch version exists, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, leave it in the archive dir with a .dist.new
+ suffix along with the last 1.1.1 branch version with a .dist suffix."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except:
+ pass
+
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (curconf, archive, str(why))
+ if os.path.exists(archive + ',v'):
+ os.system(RCS_LOCK + ' ' + archive)
+ os.system(RCS_PUT + ' ' + archive)
+
+ ret = 0
+ if newconf != '':
+ os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
+ has_branch = os.path.exists(archive)
+ if has_branch:
+ os.rename(archive, archive + '.dist')
+
+ try:
+ shutil.copy2(newconf, archive)
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (newconf, archive, str(why))
+
+ if has_branch:
+ if mrgconf != '':
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(RCS_MERGE % (archive, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat[ST_MODE])
+ os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
+ os.rename(archive, archive + '.dist.new')
+ return ret
+
+
+def file_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config to the archive-dir, bumping old versions
+ out of the way into .# versions (log-rotate style). Then, if mrgconf
+ was specified and there is a .dist version, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, archive it as a .dist.new version (which
+ gets moved to the .dist version at the end of the processing)."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except:
+ pass
+
+ # Archive the current config file if it isn't already saved
+ if os.path.exists(archive) \
+ and len(commands.getoutput('diff -aq %s %s' % (curconf,archive))) != 0:
+ suf = 1
+ while suf < 9 and os.path.exists(archive + '.' + str(suf)):
+ suf += 1
+
+ while suf > 1:
+ os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
+ suf -= 1
+
+ os.rename(archive, archive + '.1')
+
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (curconf, archive, str(why))
+
+ if newconf != '':
+ # Save off new config file in the archive dir with .dist.new suffix
+ try:
+ shutil.copy2(newconf, archive + '.dist.new')
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (newconf, archive + '.dist.new', str(why))
+
+ ret = 0
+ if mrgconf != '' and os.path.exists(archive + '.dist'):
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat[ST_MODE])
+ os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
+
+ return ret
+
+
+def rcs_archive_post_process(archive):
+ """Check in the archive file with the .dist.new suffix on the branch
+ and remove the one with the .dist suffix."""
+ os.rename(archive + '.dist.new', archive)
+ if os.path.exists(archive + '.dist'):
+ # Commit the last-distributed version onto the branch.
+ os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
+ os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
+ os.unlink(archive + '.dist')
+ else:
+ # Forcefully commit the last-distributed version onto the branch.
+ os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
+
+
+def file_archive_post_process(archive):
+ """Rename the archive file with the .dist.new suffix to a .dist suffix"""
+ os.rename(archive + '.dist.new', archive + '.dist')
diff --git a/pym/emergehelp.py b/pym/emergehelp.py
new file mode 100644
index 000000000..98a399e41
--- /dev/null
+++ b/pym/emergehelp.py
@@ -0,0 +1,370 @@
+# Copyright 1999-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/emergehelp.py,v 1.8.2.2 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: emergehelp.py,v 1.8.2.2 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+import os,sys
+from output import bold, turquoise, green
+
+def shorthelp():
+ print
+ print
+ print bold("Usage:")
+ print " "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuildfile")+" | "+turquoise("tbz2file")+" | "+turquoise("dependency")+" ] [ ... ]"
+ print " "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("system")+" | "+turquoise("world")+" >"
+ print " "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >"
+ print " "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]"
+ print " "+turquoise("emerge")+" "+turquoise("--help")+" [ "+green("system")+" | "+green("config")+" | "+green("sync")+" ] "
+ print bold("Options:")+" "+green("-")+"["+green("abcCdDefhikKlnoOpPsSuUvV")+"] ["+green("--oneshot")+"] ["+green("--newuse")+"] ["+green("--noconfmem")+"]"
+ print " ["+green("--columns")+"] ["+green("--nospinner")+"]"
+ print bold("Actions:")+" [ "+green("--clean")+" | "+green("--depclean")+" | "+green("--inject")+" | "+green("--prune")+" | "+green("--regen")+" | "+green("--search")+" | "+green("--unmerge")+" ]"
+ print
+
+def help(myaction,myopts,havecolor=1):
+ if not havecolor:
+ nocolor()
+ if not myaction and ("--help" not in myopts):
+ shorthelp()
+ print
+ print " For more help try 'emerge --help' or consult the man page."
+ print
+ elif not myaction:
+ shorthelp()
+ print
+ print turquoise("Help (this screen):")
+ print " "+green("--help")+" ("+green("-h")+" short option)"
+ print " Displays this help; an additional argument (see above) will tell"
+ print " emerge to display detailed help."
+ print
+ print turquoise("Actions:")
+ print " "+green("--clean")+" ("+green("-c")+" short option)"
+ print " Cleans the system by removing outdated packages which will not"
+ print " remove functionalities or prevent your system from working."
+ print " The arguments can be in several different formats :"
+ print " * world "
+ print " * system or"
+ print " * 'dependency specification' (in single quotes is best.)"
+ print " Here are a few examples of the dependency specification format:"
+ print " "+bold("binutils")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print " "+bold("sys-devel/binutils")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print " "+bold(">sys-devel/binutils-2.11.90.0.7")+" matches"
+ print " binutils-2.11.92.0.12.3-r1"
+ print " "+bold(">=sys-devel/binutils-2.11.90.0.7")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print " "+bold("<=sys-devel/binutils-2.11.92.0.12.3-r1")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print
+ print " "+green("--depclean")
+ print " Cleans the system by removing packages that are not associated"
+ print " with explicitly merged packages. Depclean works by creating the"
+ print " full dependency tree from the system list and the world file,"
+ print " then comparing it to installed packages. Packages installed, but"
+ print " not associated with an explicit merge are listed as candidates"
+ print " for unmerging."+turquoise(" WARNING: This can seriously affect your system by")
+ print " "+turquoise("removing packages that may have been linked against, but due to")
+ print " "+turquoise("changes in USE flags may no longer be part of the dep tree. Use")
+ print " "+turquoise("caution when employing this feature.")
+ print
+ print " "+green("--info")
+ print " Displays important portage variables that will be exported to"
+ print " ebuild.sh when performing merges. This information is useful"
+ print " for bug reports and verification of settings. All settings in"
+ print " make.{conf,globals,defaults} and the environment show up if"
+ print " run with the '--verbose' flag."
+ print
+ print " "+green("--metadata")
+ print " Causes portage to process all the metacache files as is normally done"
+ print " on the tail end of an rsync update using "+bold("emerge --sync")+". The"
+ print " processing creates the cache database that portage uses for"
+ print " pre-parsed lookups of package data."
+ print
+ print " "+green("--prune")+" ("+green("-P")+" short option)"
+ print " "+turquoise("WARNING: This action can remove important packages!")
+ print " Removes all but the most recently installed version of a package"
+ print " from your system. This action doesn't verify the possible binary"
+ print " compatibility between versions and can thus remove essential"
+ print " dependencies from your system."
+ print " The argument format is the same as for the "+bold("--clean")+" action."
+ print
+ print " "+green("--regen")
+ print " Causes portage to check and update the dependency cache of all"
+ print " ebuilds in the portage tree. This is not recommended for rsync"
+ print " users as rsync updates the cache using server-side caches."
+ print " Rsync users should simply 'emerge --sync' to regenerate."
+ print
+ print " "+green("--search")+" ("+green("-s")+" short option)"
+ print " searches for matches of the supplied string in the current local"
+ print " portage tree. The search string is a regular expression. Prepending"
+ print " the expression with a '@' will cause the category to be included in"
+ print " the search."
+ print " A few examples:"
+ print " "+bold("emerge search '^kde'")
+ print " list all packages starting with kde"
+ print " "+bold("emerge search 'gcc$'")
+ print " list all packages ending with gcc"
+ print " "+bold("emerge search @^dev-java.*jdk")
+ print " list all available Java JDKs"
+ print
+ print " "+green("--unmerge")+" ("+green("-C")+" short option)"
+ print " "+turquoise("WARNING: This action can remove important packages!")
+ print " Removes all matching packages "+bold("completely")+" from"
+ print " your system. Specify arguments using the dependency specification"
+ print " format described in the "+bold("--clean")+" action above."
+ print
+ print turquoise("Options:")
+ print " "+green("--ask")+" ("+green("-a")+" short option)"
+ print " before performing the merge, display what ebuilds and tbz2s will"
+ print " be installed, in the same format as when using --pretend; then"
+ print " ask whether to continue with the merge or abort. Using --ask is"
+ print " more efficient than using --pretend and then executing the same"
+ print " command without --pretend, as dependencies will only need to be"
+ print " calculated once."
+ print
+ print " "+green("--buildpkg")+" ("+green("-b")+" short option)"
+ print " Tell emerge to build binary packages for all ebuilds processed"
+ print " (in addition to actually merging the packages. Useful for"
+ print " maintainers or if you administrate multiple Gentoo Linux"
+ print " systems (build once, emerge tbz2s everywhere) as well as disaster"
+ print " recovery."
+ print
+ print " "+green("--buildpkgonly")+" ("+green("-B")+" short option)"
+ print " Creates a binary package, but does not merge it to the"
+ print " system. This has the restriction that unsatisfied dependencies"
+ print " must not exist for the desired package as they cannot be used if"
+ print " they do not exist on the system."
+ print
+ print " "+green("--changelog")+" ("+green("-l")+" short option)"
+ print " When pretending, also display the ChangeLog entries for packages"
+ print " that will be upgraded."
+ print
+ print " "+green("--columns")
+ print " Display the pretend output in a tabular form. Versions are"
+ print " aligned vertically."
+ print
+ print " "+green("--debug")+" ("+green("-d")+" short option)"
+ print " Tell emerge to run the ebuild command in --debug mode. In this"
+ print " mode, the bash build environment will run with the -x option,"
+ print " causing it to output verbose debug information print to stdout."
+ print " --debug is great for finding bash syntax errors as providing"
+ print " very verbose information about the dependency and build process."
+ print
+ print " "+green("--deep")+" ("+green("-D")+" short option)"
+ print " When used in conjunction with --update, this flag forces emerge"
+ print " to consider the entire dependency tree of packages, instead of"
+ print " checking only the immediate dependencies of the packages. As an"
+ print " example, this catches updates in libraries that are not directly"
+ print " listed in the dependencies of a package."
+ print
+ print " "+green("--emptytree")+" ("+green("-e")+" short option)"
+ print " Virtually tweaks the tree of installed packages to contain"
+ print " nothing. This is great to use together with --pretend. This makes"
+ print " it possible for developers to get a complete overview of the"
+ print " complete dependency tree of a certain package."
+ print
+ print " "+green("--fetchonly")+" ("+green("-f")+" short option)"
+ print " Instead of doing any package building, just perform fetches for"
+ print " all packages (main package as well as all dependencies.) When"
+ print " used in combination with --pretend all the SRC_URIs will be"
+ print " displayed multiple mirrors per line, one line per file."
+ print
+ print " "+green("--fetch-all-uri")
+ print " Same as --fetchonly except that all package files, including those"
+ print " not required to build the package, will be processed."
+ print
+ print " "+green("--getbinpkg")+" ("+green("-g")+" short option)"
+ print " Using the server and location defined in PORTAGE_BINHOST, portage"
+ print " will download the information from each binary file there and it"
+ print " will use that information to help build the dependency list. This"
+ print " option implies '-k'. (Use -gK for binary-only merging.)"
+ print
+ print " "+green("--getbinpkgonly")+" ("+green("-G")+" short option)"
+ print " This option is identical to -g, as above, except it will not use"
+ print " ANY information from the local machine. All binaries will be"
+ print " downloaded from the remote server without consulting packages"
+ print " existing in the packages directory."
+ print
+ print " "+green("--newuse")
+ print " Tells emerge to include installed packages where USE flags have "
+ print " changed since installation."
+ print
+ print " "+green("--noconfmem")
+ print " Portage keeps track of files that have been placed into"
+ print " CONFIG_PROTECT directories, and normally it will not merge the"
+ print " same file more than once, as that would become annoying. This"
+ print " can lead to problems when the user wants the file in the case"
+ print " of accidental deletion. With this option, files will always be"
+ print " merged to the live fs instead of silently dropped."
+ print
+ print " "+green("--nodeps")+" ("+green("-O")+" short option)"
+ print " Merge specified packages, but don't merge any dependencies."
+ print " Note that the build may fail if deps aren't satisfied."
+ print
+ print " "+green("--noreplace")+" ("+green("-n")+" short option)"
+ print " Skip the packages specified on the command-line that have"
+ print " already been installed. Without this option, any packages,"
+ print " ebuilds, or deps you specify on the command-line *will* cause"
+ print " Portage to remerge the package, even if it is already installed."
+ print " Note that Portage won't remerge dependencies by default."
+ print
+ print " "+green("--nospinner")
+ print " Disables the spinner regardless of terminal type."
+ print
+ print " "+green("--oneshot")
+ print " Emerge as normal, but don't add packages to the world profile."
+ print " This package will only be updated if it is depended upon by"
+ print " another package."
+ print
+ print " "+green("--onlydeps")+" ("+green("-o")+" short option)"
+ print " Only merge (or pretend to merge) the dependencies of the"
+ print " specified packages, not the packages themselves."
+ print
+ print " "+green("--pretend")+" ("+green("-p")+" short option)"
+ print " Instead of actually performing the merge, simply display what"
+ print " ebuilds and tbz2s *would* have been installed if --pretend"
+ print " weren't used. Using --pretend is strongly recommended before"
+ print " installing an unfamiliar package. In the printout, N = new,"
+ print " U = updating, R = replacing, F = fetch restricted, B = blocked"
+ print " by an already installed package, D = possible downgrading,"
+ print " S = slotted install. --verbose causes affecting use flags to be"
+ print " printed out accompanied by a '+' for enabled and a '-' for"
+ print " disabled USE flags."
+ print
+ print " "+green("--quiet")+" ("+green("-q")+" short option)"
+ print " Effects vary, but the general outcome is a reduced or condensed"
+ print " output from portage's displays."
+ print
+ print " "+green("--resume")
+ print " Resumes the last merge operation. Can be treated just like a"
+ print " regular merge as --pretend and other options work along side."
+ print " 'emerge --resume' only returns an error on failure. Nothing to"
+ print " do exits with a message and a success condition."
+ print
+ print " "+green("--searchdesc")+" ("+green("-S")+" short option)"
+ print " Matches the search string against the description field as well"
+ print " the package's name. Take caution as the descriptions are also"
+ print " matched as regular expressions."
+ print " emerge -S html"
+ print " emerge -S applet"
+ print " emerge -S 'perl.*module'"
+ print
+ print " "+green("--skipfirst")
+ print " This option is only valid in a resume situation. It removes the"
+ print " first package in the resume list so that a merge may continue in"
+ print " the presence of an uncorrectable or inconsequential error. This"
+ print " should only be used in cases where skipping the package will not"
+ print " result in failed dependencies."
+ print
+ print " "+green("--tree")+" ("+green("-t")+" short option)"
+ print " Shows the dependency tree using indentation for dependencies."
+ print " The packages are also listed in reverse merge order so that"
+ print " a package's dependencies follow the package. Only really useful"
+ print " in combination with --emptytree, --update or --deep."
+ print
+ print " "+green("--update")+" ("+green("-u")+" short option)"
+ print " Updates packages to the best version available, which may not"
+ print " always be the highest version number due to masking for testing"
+ print " and development. This will also update direct dependencies which"
+ print " may not what you want. In general use this option only in combi-"
+ print " nation with the world or system target."
+ print
+ print " "+green("--usepkg")+" ("+green("-k")+" short option)"
+ print " Tell emerge to use binary packages (from $PKGDIR) if they are"
+ print " available, thus possibly avoiding some time-consuming compiles."
+ print " This option is useful for CD installs; you can export"
+ print " PKGDIR=/mnt/cdrom/packages and then use this option to have"
+ print " emerge \"pull\" binary packages from the CD in order to satisfy"
+ print " dependencies."
+ print
+ print " "+green("--usepkgonly")+" ("+green("-K")+" short option)"
+ print " Like --usepkg above, except this only allows the use of binary"
+ print " packages, and it will abort the emerge if the package is not"
+ print " available at the time of dependency calculation."
+ print
+ print " "+green("--verbose")+" ("+green("-v")+" short option)"
+ print " Effects vary, but the general outcome is an increased or expanded"
+ print " display of content in portage's displays."
+ print
+ print " "+green("--version")+" ("+green("-V")+" short option)"
+ print " Displays the currently installed version of portage along with"
+ print " other information useful for quick reference on a system. See"
+ print " "+bold("emerge info")+" for more advanced information."
+ print
+ elif myaction in ["rsync","sync"]:
+ print
+ print bold("Usage: ")+turquoise("emerge")+" "+turquoise("--sync")
+ print
+ print " 'emerge --sync' tells emerge to update the Portage tree as specified in"
+ print " The SYNC variable found in /etc/make.conf. By default, SYNC instructs"
+ print " emerge to perform an rsync-style update with rsync.gentoo.org."
+ print
+ print " 'emerge-webrsync' exists as a helper app to emerge --sync, providing a"
+ print " method to receive the entire portage tree as a tarball that can be"
+ print " extracted and used. First time syncs would benefit greatly from this."
+ print
+ print " "+turquoise("WARNING:")
+ print " If using our rsync server, emerge will clean out all files that do not"
+ print " exist on it, including ones that you may have created. The exceptions"
+ print " to this are the distfiles, local and packages directories."
+ print
+ elif myaction=="system":
+ print
+ print bold("Usage: ")+turquoise("emerge")+" [ "+green("options")+" ] "+turquoise("system")
+ print
+ print " \"emerge system\" is the Portage system update command. When run, it"
+ print " will scan the etc/make.profile/packages file and determine what"
+ print " packages need to be installed so that your system meets the minimum"
+ print " requirements of your current system profile. Note that this doesn't"
+ print " necessarily bring your system up-to-date at all; instead, it just"
+ print " ensures that you have no missing parts. For example, if your system"
+ print " profile specifies that you should have sys-apps/iptables installed"
+ print " and you don't, then \"emerge system\" will install it (the most"
+ print " recent version that matches the profile spec) for you. It's always a"
+ print " good idea to do an \"emerge --pretend system\" before an \"emerge"
+ print " system\", just so you know what emerge is planning to do."
+ print
+ elif myaction=="config":
+ outstuff=green("Config file management support (preliminary)")+"""
+
+Portage has a special feature called "config file protection". The purpose of
+this feature is to prevent new package installs from clobbering existing
+configuration files. By default, config file protection is turned on for /etc
+and the KDE configuration dirs; more may be added in the future.
+
+When Portage installs a file into a protected directory tree like /etc, any
+existing files will not be overwritten. If a file of the same name already
+exists, Portage will change the name of the to-be- installed file from 'foo' to
+'._cfg0000_foo'. If '._cfg0000_foo' already exists, this name becomes
+'._cfg0001_foo', etc. In this way, existing files are not overwritten,
+allowing the administrator to manually merge the new config files and avoid any
+unexpected changes.
+
+In addition to protecting overwritten files, Portage will not delete any files
+from a protected directory when a package is unmerged. While this may be a
+little bit untidy, it does prevent potentially valuable config files from being
+deleted, which is of paramount importance.
+
+Protected directories are set using the CONFIG_PROTECT variable, normally
+defined in /etc/make.globals. Directory exceptions to the CONFIG_PROTECTed
+directories can be specified using the CONFIG_PROTECT_MASK variable. To find
+files that need to be updated in /etc, type:
+
+# find /etc -iname '._cfg????_*'
+
+You can disable this feature by setting CONFIG_PROTECT="-*" in /etc/make.conf.
+Then, Portage will mercilessly auto-update your config files. Alternatively,
+you can leave Config File Protection on but tell Portage that it can overwrite
+files in certain specific /etc subdirectories. For example, if you wanted
+Portage to automatically update your rc scripts and your wget configuration,
+but didn't want any other changes made without your explicit approval, you'd
+add this to /etc/make.conf:
+
+CONFIG_PROTECT_MASK="/etc/wget /etc/rc.d"
+
+etc-update is also available to aid in the merging of these files. It provides
+a vimdiff interactive merging setup and can auto-merge trivial changes.
+
+"""
+ print outstuff
+
diff --git a/pym/getbinpkg.py b/pym/getbinpkg.py
new file mode 100644
index 000000000..7145d3adb
--- /dev/null
+++ b/pym/getbinpkg.py
@@ -0,0 +1,541 @@
+# getbinpkg.py -- Portage binary-package helper functions
+# Copyright 2003-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/getbinpkg.py,v 1.12.2.3 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: getbinpkg.py,v 1.12.2.3 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+from output import *
+import htmllib,HTMLParser,string,formatter,sys,os,xpak,time,tempfile,cPickle,base64
+
+try:
+ import ftplib
+except SystemExit, e:
+ raise
+except Exception, e:
+ sys.stderr.write(red("!!! CANNOT IMPORT FTPLIB: ")+str(e)+"\n")
+
+try:
+ import httplib
+except SystemExit, e:
+ raise
+except Exception, e:
+ sys.stderr.write(red("!!! CANNOT IMPORT HTTPLIB: ")+str(e)+"\n")
+
+def make_metadata_dict(data):
+ myid,myglob = data
+
+ mydict = {}
+ for x in xpak.getindex_mem(myid):
+ mydict[x] = xpak.getitem(data,x)
+
+ return mydict
+
+class ParseLinks(HTMLParser.HTMLParser):
+ """Parser class that overrides HTMLParser to grab all anchors from an html
+ page and provide suffix and prefix limitors"""
+ def __init__(self):
+ self.PL_anchors = []
+ HTMLParser.HTMLParser.__init__(self)
+
+ def get_anchors(self):
+ return self.PL_anchors
+
+ def get_anchors_by_prefix(self,prefix):
+ newlist = []
+ for x in self.PL_anchors:
+ if (len(x) >= len(prefix)) and (x[:len(suffix)] == prefix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def get_anchors_by_suffix(self,suffix):
+ newlist = []
+ for x in self.PL_anchors:
+ if (len(x) >= len(suffix)) and (x[-len(suffix):] == suffix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def handle_endtag(self,tag):
+ pass
+
+ def handle_starttag(self,tag,attrs):
+ if tag == "a":
+ for x in attrs:
+ if x[0] == 'href':
+ if x[1] not in self.PL_anchors:
+ self.PL_anchors.append(x[1])
+
+
+def create_conn(baseurl,conn=None):
+ """(baseurl,conn) --- Takes a protocol://site:port/address url, and an
+ optional connection. If connection is already active, it is passed on.
+ baseurl is reduced to address and is returned in tuple (conn,address)"""
+ parts = string.split(baseurl, "://", 1)
+ if len(parts) != 2:
+ raise ValueError, "Provided URL does not contain protocol identifier. '%s'" % baseurl
+ protocol,url_parts = parts
+ del parts
+ host,address = string.split(url_parts, "/", 1)
+ del url_parts
+ address = "/"+address
+
+ userpass_host = string.split(host, "@", 1)
+ if len(userpass_host) == 1:
+ host = userpass_host[0]
+ userpass = ["anonymous"]
+ else:
+ host = userpass_host[1]
+ userpass = string.split(userpass_host[0], ":")
+ del userpass_host
+
+ if len(userpass) > 2:
+ raise ValueError, "Unable to interpret username/password provided."
+ elif len(userpass) == 2:
+ username = userpass[0]
+ password = userpass[1]
+ elif len(userpass) == 1:
+ username = userpass[0]
+ password = None
+ del userpass
+
+ http_headers = {}
+ http_params = {}
+ if username and password:
+ http_headers = {
+ "Authorization": "Basic %s" %
+ string.replace(
+ base64.encodestring("%s:%s" % (username, password)),
+ "\012",
+ ""
+ ),
+ }
+
+ if not conn:
+ if protocol == "https":
+ conn = httplib.HTTPSConnection(host)
+ elif protocol == "http":
+ conn = httplib.HTTPConnection(host)
+ elif protocol == "ftp":
+ passive = 1
+ if(host[-1] == "*"):
+ passive = 0
+ host = host[:-1]
+ conn = ftplib.FTP(host)
+ if password:
+ conn.login(username,password)
+ else:
+ sys.stderr.write(yellow(" * No password provided for username")+" '"+str(username)+"'\n\n")
+ conn.login(username)
+ conn.set_pasv(passive)
+ conn.set_debuglevel(0)
+ else:
+ raise NotImplementedError, "%s is not a supported protocol." % protocol
+
+ return (conn,protocol,address, http_params, http_headers)
+
+def make_ftp_request(conn, address, rest=None, dest=None):
+ """(conn,address,rest) --- uses the conn object to request the data
+ from address and issuing a rest if it is passed."""
+ try:
+
+ if dest:
+ fstart_pos = dest.tell()
+
+ conn.voidcmd("TYPE I")
+ fsize = conn.size(address)
+
+ if (rest != None) and (rest < 0):
+ rest = fsize+int(rest)
+ if rest < 0:
+ rest = 0
+
+ if rest != None:
+ mysocket = conn.transfercmd("RETR "+str(address), rest)
+ else:
+ mysocket = conn.transfercmd("RETR "+str(address))
+
+ mydata = ""
+ while 1:
+ somedata = mysocket.recv(8192)
+ if somedata:
+ if dest:
+ dest.write(somedata)
+ else:
+ mydata = mydata + somedata
+ else:
+ break
+
+ if dest:
+ data_size = fstart_pos - dest.tell()
+ else:
+ data_size = len(mydata)
+
+ mysocket.close()
+ conn.voidresp()
+ conn.voidcmd("TYPE A")
+
+ return mydata,not (fsize==data_size),""
+
+ except ValueError, e:
+ return None,int(str(e)[:4]),str(e)
+
+
+def make_http_request(conn, address, params={}, headers={}, dest=None):
+ """(conn,address,params,headers) --- uses the conn object to request
+ the data from address, performing Location forwarding and using the
+ optional params and headers."""
+
+ rc = 0
+ response = None
+ while (rc == 0) or (rc == 301) or (rc == 302):
+ try:
+ if (rc != 0):
+ conn,ignore,ignore,ignore,ignore = create_conn(address)
+ conn.request("GET", address, params, headers)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ return None,None,"Server request failed: "+str(e)
+ response = conn.getresponse()
+ rc = response.status
+
+ # 301 means that the page address is wrong.
+ if ((rc == 301) or (rc == 302)):
+ ignored_data = response.read()
+ del ignored_data
+ for x in string.split(str(response.msg), "\n"):
+ parts = string.split(x, ": ", 1)
+ if parts[0] == "Location":
+ if (rc == 301):
+ sys.stderr.write(red("Location has moved: ")+str(parts[1])+"\n")
+ if (rc == 302):
+ sys.stderr.write(red("Location has temporarily moved: ")+str(parts[1])+"\n")
+ address = parts[1]
+ break
+
+ if (rc != 200) and (rc != 206):
+ sys.stderr.write(str(response.msg)+"\n")
+ sys.stderr.write(response.read()+"\n")
+ sys.stderr.write("address: "+address+"\n")
+ return None,rc,"Server did not respond successfully ("+str(response.status)+": "+str(response.reason)+")"
+
+ if dest:
+ dest.write(response.read())
+ return "",0,""
+
+ return response.read(),0,""
+
+
+def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
+ myarray = []
+
+ if not (prefix and suffix):
+ match_both = 0
+
+ for x in array:
+ add_p = 0
+ if prefix and (len(x) >= len(prefix)) and (x[:len(prefix)] == prefix):
+ add_p = 1
+
+ if match_both:
+ if prefix and not add_p: # Require both, but don't have first one.
+ continue
+ else:
+ if add_p: # Only need one, and we have it.
+ myarray.append(x[:])
+ continue
+
+ if not allow_overlap: # Not allow to overlap prefix and suffix
+ if len(x) >= (len(prefix)+len(suffix)):
+ y = x[len(prefix):]
+ else:
+ continue # Too short to match.
+ else:
+ y = x # Do whatever... We're overlapping.
+
+ if suffix and (len(x) >= len(suffix)) and (x[-len(suffix):] == suffix):
+ myarray.append(x) # It matches
+ else:
+ continue # Doesn't match.
+
+ return myarray
+
+
+
+def dir_get_list(baseurl,conn=None):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ listing = None
+ if protocol in ["http","https"]:
+ page,rc,msg = make_http_request(conn,address,params,headers)
+
+ if page:
+ parser = ParseLinks()
+ parser.feed(page)
+ del page
+ listing = parser.get_anchors()
+ else:
+ raise Exception, "Unable to get listing: %s %s" % (rc,msg)
+ elif protocol in ["ftp"]:
+ if address[-1] == '/':
+ olddir = conn.pwd()
+ conn.cwd(address)
+ listing = conn.nlst()
+ conn.cwd(olddir)
+ del olddir
+ else:
+ listing = conn.nlst(address)
+ else:
+ raise TypeError, "Unknown protocol. '%s'" % protocol
+
+ if not keepconnection:
+ conn.close()
+
+ return listing
+
+def file_get_metadata(baseurl,conn=None, chunk_size=3000):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ if protocol in ["http","https"]:
+ headers["Range"] = "bytes=-"+str(chunk_size)
+ data,rc,msg = make_http_request(conn, address, params, headers)
+ elif protocol in ["ftp"]:
+ data,rc,msg = make_ftp_request(conn, address, -chunk_size)
+ else:
+ raise TypeError, "Unknown protocol. '%s'" % protocol
+
+ if data:
+ xpaksize = xpak.decodeint(data[-8:-4])
+ if (xpaksize+8) > chunk_size:
+ myid = file_get_metadata(baseurl, conn, (xpaksize+8))
+ if not keepconnection:
+ conn.close()
+ return myid
+ else:
+ xpak_data = data[len(data)-(xpaksize+8):-8]
+ del data
+
+ myid = xpak.xsplit_mem(xpak_data)
+ if not myid:
+ myid = None,None
+ del xpak_data
+ else:
+ myid = None,None
+
+ if not keepconnection:
+ conn.close()
+
+ return myid
+
+
+def file_get(baseurl,dest,conn=None,fcmd=None):
+ """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://[user[:pass]@]<site>[:port]<path>"""
+
+ if not fcmd:
+ return file_get_lib(baseurl,dest,conn)
+
+ fcmd = string.replace(fcmd, "${DISTDIR}", dest)
+ fcmd = string.replace(fcmd, "${URI}", baseurl)
+ fcmd = string.replace(fcmd, "${FILE}", os.path.basename(baseurl))
+ mysplit = string.split(fcmd)
+ mycmd = mysplit[0]
+ myargs = [os.path.basename(mycmd)]+mysplit[1:]
+ mypid=os.fork()
+ if mypid == 0:
+ os.execv(mycmd,myargs)
+ sys.stderr.write("!!! Failed to spawn fetcher.\n")
+ sys.exit(1)
+ retval=os.waitpid(mypid,0)[1]
+ if (retval & 0xff) == 0:
+ retval = retval >> 8
+ else:
+ sys.stderr.write("Spawned processes caught a signal.\n")
+ sys.exit(1)
+ if retval != 0:
+ sys.stderr.write("Fetcher exited with a failure condition.\n")
+ return 0
+ return 1
+
+def file_get_lib(baseurl,dest,conn=None):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ sys.stderr.write("Fetching '"+str(os.path.basename(address)+"'\n"))
+ if protocol in ["http","https"]:
+ data,rc,msg = make_http_request(conn, address, params, headers, dest=dest)
+ elif protocol in ["ftp"]:
+ data,rc,msg = make_ftp_request(conn, address, dest=dest)
+ else:
+ raise TypeError, "Unknown protocol. '%s'" % protocol
+
+ if not keepconnection:
+ conn.close()
+
+ return rc
+
+
+def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
+ """(baseurl,conn,chunk_size,verbose) --
+ """
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ if makepickle == None:
+ makepickle = "/var/cache/edb/metadata.idx.most_recent"
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ filedict = {}
+
+ try:
+ metadatafile = open("/var/cache/edb/remote_metadata.pickle")
+ metadata = cPickle.load(metadatafile)
+ sys.stderr.write("Loaded metadata pickle.\n")
+ metadatafile.close()
+ except SystemExit, e:
+ raise
+ except:
+ metadata = {}
+ if not metadata.has_key(baseurl):
+ metadata[baseurl]={}
+ if not metadata[baseurl].has_key("indexname"):
+ metadata[baseurl]["indexname"]=""
+ if not metadata[baseurl].has_key("timestamp"):
+ metadata[baseurl]["timestamp"]=0
+ if not metadata[baseurl].has_key("unmodified"):
+ metadata[baseurl]["unmodified"]=0
+ if not metadata[baseurl].has_key("data"):
+ metadata[baseurl]["data"]={}
+
+ filelist = dir_get_list(baseurl, conn)
+ tbz2list = match_in_array(filelist, suffix=".tbz2")
+ metalist = match_in_array(filelist, prefix="metadata.idx")
+ del filelist
+
+ # Determine if our metadata file is current.
+ metalist.sort()
+ metalist.reverse() # makes the order new-to-old.
+ havecache=0
+ for mfile in metalist:
+ if usingcache and \
+ ((metadata[baseurl]["indexname"] != mfile) or \
+ (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))):
+ # Try to download new cache until we succeed on one.
+ data=""
+ for trynum in [1,2,3]:
+ mytempfile = tempfile.TemporaryFile()
+ try:
+ file_get(baseurl+"/"+mfile, mytempfile, conn)
+ if mytempfile.tell() > len(data):
+ mytempfile.seek(0)
+ data = mytempfile.read()
+ except ValueError, e:
+ sys.stderr.write("--- "+str(e)+"\n")
+ if trynum < 3:
+ sys.stderr.write("Retrying...\n")
+ mytempfile.close()
+ continue
+ if match_in_array([mfile],suffix=".gz"):
+ sys.stderr.write("gzip'd\n")
+ try:
+ import gzip
+ mytempfile.seek(0)
+ gzindex = gzip.GzipFile(mfile[:-3],'rb',9,mytempfile)
+ data = gzindex.read()
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ mytempfile.close()
+ sys.stderr.write("!!! Failed to use gzip: "+str(e)+"\n")
+ mytempfile.close()
+ try:
+ metadata[baseurl]["data"] = cPickle.loads(data)
+ del data
+ metadata[baseurl]["indexname"] = mfile
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadata[baseurl]["modified"] = 0 # It's not, right after download.
+ sys.stderr.write("Pickle loaded.\n")
+ break
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ sys.stderr.write("!!! Failed to read data from index: "+str(mfile)+"\n")
+ sys.stderr.write("!!! "+str(e)+"\n")
+ try:
+ metadatafile = open("/var/cache/edb/remote_metadata.pickle", "w+")
+ cPickle.dump(metadata,metadatafile)
+ metadatafile.close()
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ sys.stderr.write("!!! Failed to write binary metadata to disk!\n")
+ sys.stderr.write("!!! "+str(e)+"\n")
+ break
+ # We may have metadata... now we run through the tbz2 list and check.
+ sys.stderr.write(yellow("cache miss: 'x'")+" --- "+green("cache hit: 'o'")+"\n")
+ for x in tbz2list:
+ x = os.path.basename(x)
+ if ((not metadata[baseurl]["data"].has_key(x)) or \
+ (x not in metadata[baseurl]["data"].keys())):
+ sys.stderr.write(yellow("x"))
+ metadata[baseurl]["modified"] = 1
+ myid = file_get_metadata(baseurl+"/"+x, conn, chunk_size)
+
+ if myid[0]:
+ metadata[baseurl]["data"][x] = make_metadata_dict(myid)
+ elif verbose:
+ sys.stderr.write(red("!!! Failed to retrieve metadata on: ")+str(x)+"\n")
+ else:
+ sys.stderr.write(green("o"))
+ sys.stderr.write("\n")
+
+ try:
+ if metadata[baseurl].has_key("modified") and metadata[baseurl]["modified"]:
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadatafile = open("/var/cache/edb/remote_metadata.pickle", "w+")
+ cPickle.dump(metadata,metadatafile)
+ metadatafile.close()
+ if makepickle:
+ metadatafile = open(makepickle, "w")
+ cPickle.dump(metadata[baseurl]["data"],metadatafile)
+ metadatafile.close()
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ sys.stderr.write("!!! Failed to write binary metadata to disk!\n")
+ sys.stderr.write("!!! "+str(e)+"\n")
+
+ if not keepconnection:
+ conn.close()
+
+ return metadata[baseurl]["data"]
diff --git a/pym/output.py b/pym/output.py
new file mode 100644
index 000000000..ddb85c1a0
--- /dev/null
+++ b/pym/output.py
@@ -0,0 +1,167 @@
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/output.py,v 1.24.2.4 2005/04/17 09:01:55 jstubbs Exp $
+cvs_id_string="$Id: output.py,v 1.24.2.4 2005/04/17 09:01:55 jstubbs Exp $"[5:-2]
+
+import os,sys,re
+
+havecolor=1
+dotitles=1
+
+esc_seq = "\x1b["
+
+g_attr = {}
+g_attr["normal"] = 0
+
+g_attr["bold"] = 1
+g_attr["faint"] = 2
+g_attr["standout"] = 3
+g_attr["underline"] = 4
+g_attr["blink"] = 5
+g_attr["overline"] = 6 # Why is overline actually useful?
+g_attr["reverse"] = 7
+g_attr["invisible"] = 8
+
+g_attr["no-attr"] = 22
+g_attr["no-standout"] = 23
+g_attr["no-underline"] = 24
+g_attr["no-blink"] = 25
+g_attr["no-overline"] = 26
+g_attr["no-reverse"] = 27
+# 28 isn't defined?
+# 29 isn't defined?
+g_attr["black"] = 30
+g_attr["red"] = 31
+g_attr["green"] = 32
+g_attr["yellow"] = 33
+g_attr["blue"] = 34
+g_attr["magenta"] = 35
+g_attr["cyan"] = 36
+g_attr["white"] = 37
+# 38 isn't defined?
+g_attr["default"] = 39
+g_attr["bg_black"] = 40
+g_attr["bg_red"] = 41
+g_attr["bg_green"] = 42
+g_attr["bg_yellow"] = 43
+g_attr["bg_blue"] = 44
+g_attr["bg_magenta"] = 45
+g_attr["bg_cyan"] = 46
+g_attr["bg_white"] = 47
+g_attr["bg_default"] = 49
+
+
+# make_seq("blue", "black", "normal")
+def color(fg, bg="default", attr=["normal"]):
+ mystr = esc_seq[:] + "%02d" % g_attr[fg]
+ for x in [bg]+attr:
+ mystr += ";%02d" % g_attr[x]
+ return mystr+"m"
+
+
+
+codes={}
+codes["reset"] = esc_seq + "39;49;00m"
+
+codes["bold"] = esc_seq + "01m"
+codes["faint"] = esc_seq + "02m"
+codes["standout"] = esc_seq + "03m"
+codes["underline"] = esc_seq + "04m"
+codes["blink"] = esc_seq + "05m"
+codes["overline"] = esc_seq + "06m" # Who made this up? Seriously.
+
+codes["teal"] = esc_seq + "36m"
+codes["turquoise"] = esc_seq + "36;01m"
+
+codes["fuchsia"] = esc_seq + "35;01m"
+codes["purple"] = esc_seq + "35m"
+
+codes["blue"] = esc_seq + "34;01m"
+codes["darkblue"] = esc_seq + "34m"
+
+codes["green"] = esc_seq + "32;01m"
+codes["darkgreen"] = esc_seq + "32m"
+
+codes["yellow"] = esc_seq + "33;01m"
+codes["brown"] = esc_seq + "33m"
+
+codes["red"] = esc_seq + "31;01m"
+codes["darkred"] = esc_seq + "31m"
+
+def nc_len(mystr):
+ tmp = re.sub(esc_seq + "^m]+m", "", mystr);
+ return len(tmp)
+
+def xtermTitle(mystr):
+ if havecolor and dotitles and os.environ.has_key("TERM") and sys.stderr.isatty():
+ myt=os.environ["TERM"]
+ legal_terms = ["xterm","Eterm","aterm","rxvt","screen","kterm","rxvt-unicode"]
+ for term in legal_terms:
+ if myt.startswith(term):
+ sys.stderr.write("\x1b]2;"+str(mystr)+"\x07")
+ sys.stderr.flush()
+ break
+
+def xtermTitleReset():
+ if havecolor and dotitles and os.environ.has_key("TERM"):
+ myt=os.environ["TERM"]
+ xtermTitle(os.environ["TERM"])
+
+
+def notitles():
+ "turn off title setting"
+ dotitles=0
+
+def nocolor():
+ "turn off colorization"
+ havecolor=0
+ for x in codes.keys():
+ codes[x]=""
+
+def resetColor():
+ return codes["reset"]
+
+def ctext(color,text):
+ return codes[ctext]+text+codes["reset"]
+
+def bold(text):
+ return codes["bold"]+text+codes["reset"]
+def white(text):
+ return bold(text)
+
+def teal(text):
+ return codes["teal"]+text+codes["reset"]
+def turquoise(text):
+ return codes["turquoise"]+text+codes["reset"]
+def darkteal(text):
+ return turquoise(text)
+
+def fuscia(text): # Don't use this one. It's spelled wrong!
+ return codes["fuchsia"]+text+codes["reset"]
+def fuchsia(text):
+ return codes["fuchsia"]+text+codes["reset"]
+def purple(text):
+ return codes["purple"]+text+codes["reset"]
+
+def blue(text):
+ return codes["blue"]+text+codes["reset"]
+def darkblue(text):
+ return codes["darkblue"]+text+codes["reset"]
+
+def green(text):
+ return codes["green"]+text+codes["reset"]
+def darkgreen(text):
+ return codes["darkgreen"]+text+codes["reset"]
+
+def yellow(text):
+ return codes["yellow"]+text+codes["reset"]
+def brown(text):
+ return codes["brown"]+text+codes["reset"]
+def darkyellow(text):
+ return brown(text)
+
+def red(text):
+ return codes["red"]+text+codes["reset"]
+def darkred(text):
+ return codes["darkred"]+text+codes["reset"]
+
diff --git a/pym/portage.py b/pym/portage.py
new file mode 100644
index 000000000..a6702344b
--- /dev/null
+++ b/pym/portage.py
@@ -0,0 +1,7452 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage.py,v 1.524.2.76 2005/05/29 12:40:08 jstubbs Exp $
+cvs_id_string="$Id: portage.py,v 1.524.2.76 2005/05/29 12:40:08 jstubbs Exp $"[5:-2]
+
+VERSION="$Revision: 1.524.2.76 $"[11:-2] + "-cvs"
+
+# ===========================================================================
+# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
+# ===========================================================================
+
+try:
+ import sys
+except SystemExit, e:
+ raise
+except:
+ print "Failed to import sys! Something is _VERY_ wrong with python."
+ raise SystemExit, 127
+
+try:
+ import os,string,types,atexit,signal,fcntl
+ import time,cPickle,traceback,copy
+ import re,pwd,grp,commands
+ import shlex,shutil
+
+ import stat
+ import commands
+ from time import sleep
+ from random import shuffle
+except SystemExit, e:
+ raise
+except Exception, e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
+ sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
+ sys.stderr.write("!!! itself and thus portage is no able to continue processing.\n\n")
+
+ sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
+ sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
+
+ sys.stderr.write(" "+str(e)+"\n\n");
+ sys.exit(127)
+except:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
+ sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
+ sys.stderr.write("!!! itself and thus portage is no able to continue processing.\n\n")
+
+ sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
+ sys.stderr.write("!!! gone wrong. The exception was non-standard and we were unable to catch it.\n\n")
+ sys.exit(127)
+
+try:
+ # XXX: This should get renamed to bsd_chflags, I think.
+ import chflags
+ bsd_chflags = chflags
+except SystemExit, e:
+ raise
+except:
+ # XXX: This should get renamed to bsd_chflags, I think.
+ bsd_chflags = None
+
+try:
+ import cvstree
+ import xpak
+ import getbinpkg
+ import portage_dep
+
+ # XXX: This needs to get cleaned up.
+ import output
+ from output import blue, bold, brown, darkblue, darkgreen, darkred, darkteal, \
+ darkyellow, fuchsia, fuscia, green, purple, red, teal, turquoise, white, \
+ xtermTitle, xtermTitleReset, yellow
+
+ import portage_const
+ from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
+ USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
+ PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
+ EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
+ MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
+ DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
+ INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, SANDBOX_PIDS_FILE, CONFIG_MEMORY_FILE,\
+ INCREMENTALS, STICKIES
+
+ from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
+ portage_uid, portage_gid
+
+ import portage_util
+ from portage_util import grab_multiple, grabdict, grabdict_package, grabfile, grabfile_package, \
+ grabints, map_dictlist_vals, pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
+ unique_array, varexpand, writedict, writeints, writemsg, getconfig
+ import portage_exception
+ import portage_gpg
+ import portage_locks
+ import portage_exec
+ from portage_locks import unlockfile,unlockdir,lockfile,lockdir
+ import portage_checksum
+ from portage_checksum import perform_md5,perform_checksum,prelink_capable
+ from portage_localization import _
+except SystemExit, e:
+ raise
+except Exception, e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
+ sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
+ sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
+ sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
+ sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
+ sys.stderr.write("!!! a recovery of portage.\n")
+
+ sys.stderr.write(" "+str(e)+"\n\n")
+ sys.exit(127)
+
+
+# ===========================================================================
+# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
+# ===========================================================================
+
+
+def exithandler(signum,frame):
+ """Handles ^C interrupts in a sane manner"""
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+
+ # 0=send to *everybody* in process group
+ portageexit()
+ print "Exiting due to signal"
+ os.kill(0,signum)
+ sys.exit(1)
+
+signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+signal.signal(signal.SIGINT, exithandler)
+signal.signal(signal.SIGTERM, exithandler)
+signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+def load_mod(name):
+ modname = string.join(string.split(name,".")[:-1],".")
+ mod = __import__(modname)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
+ for x in key_order:
+ if top_dict.has_key(x) and top_dict[x].has_key(key):
+ if FullCopy:
+ return copy.deepcopy(top_dict[x][key])
+ else:
+ return top_dict[x][key]
+ if EmptyOnError:
+ return ""
+ else:
+ raise KeyError, "Key not found in list; '%s'" % key
+
+def getcwd():
+ "this fixes situations where the current directory doesn't exist"
+ try:
+ return os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ return "/"
+getcwd()
+
+def abssymlink(symlink):
+ "This reads symlinks, resolving the relative symlinks, and returning the absolute."
+ mylink=os.readlink(symlink)
+ if mylink[0] != '/':
+ mydir=os.path.dirname(symlink)
+ mylink=mydir+"/"+mylink
+ return os.path.normpath(mylink)
+
+def suffix_array(array,suffix,doblanks=1):
+ """Appends a given suffix to each element in an Array/List/Tuple.
+ Returns a List."""
+ if type(array) not in [types.ListType, types.TupleType]:
+ raise TypeError, "List or Tuple expected. Got %s" % type(array)
+ newarray=[]
+ for x in array:
+ if x or doblanks:
+ newarray.append(x + suffix)
+ else:
+ newarray.append(x)
+ return newarray
+
+def prefix_array(array,prefix,doblanks=1):
+ """Prepends a given prefix to each element in an Array/List/Tuple.
+ Returns a List."""
+ if type(array) not in [types.ListType, types.TupleType]:
+ raise TypeError, "List or Tuple expected. Got %s" % type(array)
+ newarray=[]
+ for x in array:
+ if x or doblanks:
+ newarray.append(prefix + x)
+ else:
+ newarray.append(x)
+ return newarray
+
+def normalize_path(mypath):
+ newpath = os.path.normpath(mypath)
+ if len(newpath) > 1:
+ if newpath[:2] == "//":
+ newpath = newpath[1:]
+ return newpath
+
+dircache = {}
+cacheHit=0
+cacheMiss=0
+cacheStale=0
+def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
+ global cacheHit,cacheMiss,cacheStale
+ mypath = normalize_path(my_original_path)
+ if dircache.has_key(mypath):
+ cacheHit += 1
+ cached_mtime, list, ftype = dircache[mypath]
+ else:
+ cacheMiss += 1
+ cached_mtime, list, ftype = -1, [], []
+ try:
+ pathstat = os.stat(mypath)
+ if stat.S_ISDIR(pathstat[stat.ST_MODE]):
+ mtime = pathstat[stat.ST_MTIME]
+ else:
+ raise Exception
+ except SystemExit, e:
+ raise
+ except:
+ if EmptyOnError:
+ return [], []
+ return None, None
+ if mtime != cached_mtime:
+ if dircache.has_key(mypath):
+ cacheStale += 1
+ list = os.listdir(mypath)
+ ftype = []
+ for x in list:
+ try:
+ if followSymlinks:
+ pathstat = os.stat(mypath+"/"+x)
+ else:
+ pathstat = os.lstat(mypath+"/"+x)
+
+ if stat.S_ISREG(pathstat[stat.ST_MODE]):
+ ftype.append(0)
+ elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
+ ftype.append(1)
+ elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
+ ftype.append(2)
+ else:
+ ftype.append(3)
+ except SystemExit, e:
+ raise
+ except:
+ ftype.append(3)
+ dircache[mypath] = mtime, list, ftype
+
+ ret_list = []
+ ret_ftype = []
+ for x in range(0, len(list)):
+ if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
+ ret_list.append(list[x])
+ ret_ftype.append(ftype[x])
+ elif (list[x] not in ignorelist):
+ ret_list.append(list[x])
+ ret_ftype.append(ftype[x])
+
+ writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
+ return ret_list, ret_ftype
+
+
+def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
+ EmptyOnError=False):
+
+ list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
+
+ if list is None:
+ list=[]
+ if ftype is None:
+ ftype=[]
+
+ if not filesonly and not recursive:
+ return list
+
+ if recursive:
+ x=0
+ while x<len(ftype):
+ if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
+ l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
+ followSymlinks)
+
+ l=l[:]
+ for y in range(0,len(l)):
+ l[y]=list[x]+"/"+l[y]
+ list=list+l
+ ftype=ftype+f
+ x+=1
+ if filesonly:
+ rlist=[]
+ for x in range(0,len(ftype)):
+ if ftype[x]==0:
+ rlist=rlist+[list[x]]
+ else:
+ rlist=list
+
+ return rlist
+
+starttime=long(time.time())
+features=[]
+
+def tokenize(mystring):
+ """breaks a string like 'foo? (bar) oni? (blah (blah))'
+ into embedded lists; returns None on paren mismatch"""
+
+ # This function is obsoleted.
+ # Use dep_parenreduce
+
+ newtokens=[]
+ curlist=newtokens
+ prevlists=[]
+ level=0
+ accum=""
+ for x in mystring:
+ if x=="(":
+ if accum:
+ curlist.append(accum)
+ accum=""
+ prevlists.append(curlist)
+ curlist=[]
+ level=level+1
+ elif x==")":
+ if accum:
+ curlist.append(accum)
+ accum=""
+ if level==0:
+ writemsg("!!! tokenizer: Unmatched left parenthesis in:\n'"+str(mystring)+"'\n")
+ return None
+ newlist=curlist
+ curlist=prevlists.pop()
+ curlist.append(newlist)
+ level=level-1
+ elif x in string.whitespace:
+ if accum:
+ curlist.append(accum)
+ accum=""
+ else:
+ accum=accum+x
+ if accum:
+ curlist.append(accum)
+ if (level!=0):
+ writemsg("!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+str(mystring)+"'\n")
+ return None
+ return newtokens
+
+def flatten(mytokens):
+ """this function now turns a [1,[2,3]] list into
+ a [1,2,3] list and returns it."""
+ newlist=[]
+ for x in mytokens:
+ if type(x)==types.ListType:
+ newlist.extend(flatten(x))
+ else:
+ newlist.append(x)
+ return newlist
+
+#beautiful directed graph object
+
+class digraph:
+ def __init__(self):
+ self.dict={}
+ #okeys = keys, in order they were added (to optimize firstzero() ordering)
+ self.okeys=[]
+
+ def addnode(self,mykey,myparent):
+ if not self.dict.has_key(mykey):
+ self.okeys.append(mykey)
+ if myparent==None:
+ self.dict[mykey]=[0,[]]
+ else:
+ self.dict[mykey]=[0,[myparent]]
+ self.dict[myparent][0]=self.dict[myparent][0]+1
+ return
+ if myparent and (not myparent in self.dict[mykey][1]):
+ self.dict[mykey][1].append(myparent)
+ self.dict[myparent][0]=self.dict[myparent][0]+1
+
+ def delnode(self,mykey):
+ if not self.dict.has_key(mykey):
+ return
+ for x in self.dict[mykey][1]:
+ self.dict[x][0]=self.dict[x][0]-1
+ del self.dict[mykey]
+ while 1:
+ try:
+ self.okeys.remove(mykey)
+ except ValueError:
+ break
+
+ def allnodes(self):
+ "returns all nodes in the dictionary"
+ return self.dict.keys()
+
+ def firstzero(self):
+ "returns first node with zero references, or NULL if no such node exists"
+ for x in self.okeys:
+ if self.dict[x][0]==0:
+ return x
+ return None
+
+ def depth(self, mykey):
+ depth=0
+ while (self.dict[mykey][1]):
+ depth=depth+1
+ mykey=self.dict[mykey][1][0]
+ return depth
+
+ def allzeros(self):
+ "returns all nodes with zero references, or NULL if no such node exists"
+ zerolist = []
+ for x in self.dict.keys():
+ mys = string.split(x)
+ if mys[0] != "blocks" and self.dict[x][0]==0:
+ zerolist.append(x)
+ return zerolist
+
+ def hasallzeros(self):
+ "returns 0/1, Are all nodes zeros? 1 : 0"
+ zerolist = []
+ for x in self.dict.keys():
+ if self.dict[x][0]!=0:
+ return 0
+ return 1
+
+ def empty(self):
+ if len(self.dict)==0:
+ return 1
+ return 0
+
+ def hasnode(self,mynode):
+ return self.dict.has_key(mynode)
+
+ def copy(self):
+ mygraph=digraph()
+ for x in self.dict.keys():
+ mygraph.dict[x]=self.dict[x][:]
+ mygraph.okeys=self.okeys[:]
+ return mygraph
+
+# valid end of version components; integers specify offset from release version
+# pre=prerelease, p=patchlevel (should always be followed by an int), rc=release candidate
+# all but _p (where it is required) can be followed by an optional trailing integer
+
+endversion={"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1}
+# as there's no reliable way to set {}.keys() order
+# netversion_keys will be used instead of endversion.keys
+# to have fixed search order, so that "pre" is checked
+# before "p"
+endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
+
+#parse /etc/env.d and generate /etc/profile.env
+
+def env_update(makelinks=1):
+ global root
+ if not os.path.exists(root+"etc/env.d"):
+ prevmask=os.umask(0)
+ os.makedirs(root+"etc/env.d",0755)
+ os.umask(prevmask)
+ fns=listdir(root+"etc/env.d",EmptyOnError=1)
+ fns.sort()
+ pos=0
+ while (pos<len(fns)):
+ if len(fns[pos])<=2:
+ del fns[pos]
+ continue
+ if (fns[pos][0] not in string.digits) or (fns[pos][1] not in string.digits):
+ del fns[pos]
+ continue
+ pos=pos+1
+
+ specials={
+ "KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],
+ "INFODIR":[],"INFOPATH":[],"ROOTPATH":[],"CONFIG_PROTECT":[],
+ "CONFIG_PROTECT_MASK":[],"PRELINK_PATH":[],"PRELINK_PATH_MASK":[],
+ "PYTHONPATH":[], "ADA_INCLUDE_PATH":[], "ADA_OBJECTS_PATH":[]
+ }
+ colon_separated = [
+ "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
+ "LDPATH", "MANPATH",
+ "PATH", "PRELINK_PATH",
+ "PRELINK_PATH_MASK", "PYTHONPATH"
+ ]
+
+ env={}
+
+ for x in fns:
+ # don't process backup files
+ if x[-1]=='~' or x[-4:]==".bak":
+ continue
+ myconfig=getconfig(root+"etc/env.d/"+x)
+ if myconfig==None:
+ writemsg("!!! Parsing error in "+str(root)+"etc/env.d/"+str(x)+"\n")
+ #parse error
+ continue
+ # process PATH, CLASSPATH, LDPATH
+ for myspec in specials.keys():
+ if myconfig.has_key(myspec):
+ if myspec in colon_separated:
+ specials[myspec].extend(myconfig[myspec].split(":"))
+ else:
+ specials[myspec].append(myconfig[myspec])
+ del myconfig[myspec]
+ # process all other variables
+ for myenv in myconfig.keys():
+ env[myenv]=myconfig[myenv]
+
+ if os.path.exists(root+"etc/ld.so.conf"):
+ myld=open(root+"etc/ld.so.conf")
+ myldlines=myld.readlines()
+ myld.close()
+ oldld=[]
+ for x in myldlines:
+ #each line has at least one char (a newline)
+ if x[0]=="#":
+ continue
+ oldld.append(x[:-1])
+ # os.rename(root+"etc/ld.so.conf",root+"etc/ld.so.conf.bak")
+ # Where is the new ld.so.conf generated? (achim)
+ else:
+ oldld=None
+
+ ld_cache_update=False
+ if os.environ.has_key("PORTAGE_CALLER") and \
+ os.environ["PORTAGE_CALLER"] == "env-update":
+ ld_cache_update = True
+
+ newld=specials["LDPATH"]
+ if (oldld!=newld):
+ #ld.so.conf needs updating and ldconfig needs to be run
+ myfd=open(root+"etc/ld.so.conf","w")
+ myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
+ myfd.write("# contents of /etc/env.d directory\n")
+ for x in specials["LDPATH"]:
+ myfd.write(x+"\n")
+ myfd.close()
+ ld_cache_update=True
+
+ # Update prelink.conf if we are prelink-enabled
+ if prelink_capable:
+ newprelink=open(root+"etc/prelink.conf","w")
+ newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
+ newprelink.write("# contents of /etc/env.d directory\n")
+
+ for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
+ newprelink.write("-l "+x+"\n");
+ for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
+ if not x:
+ continue
+ if x[-1]!='/':
+ x=x+"/"
+ plmasked=0
+ for y in specials["PRELINK_PATH_MASK"]:
+ if not y:
+ continue
+ if y[-1]!='/':
+ y=y+"/"
+ if y==x[0:len(y)]:
+ plmasked=1
+ break
+ if not plmasked:
+ newprelink.write("-h "+x+"\n")
+ for x in specials["PRELINK_PATH_MASK"]:
+ newprelink.write("-b "+x+"\n")
+ newprelink.close()
+
+ if not mtimedb.has_key("ldpath"):
+ mtimedb["ldpath"]={}
+
+ for x in specials["LDPATH"]+['/usr/lib','/lib']:
+ try:
+ newldpathtime=os.stat(x)[stat.ST_MTIME]
+ except SystemExit, e:
+ raise
+ except:
+ newldpathtime=0
+ if mtimedb["ldpath"].has_key(x):
+ if mtimedb["ldpath"][x]==newldpathtime:
+ pass
+ else:
+ mtimedb["ldpath"][x]=newldpathtime
+ ld_cache_update=True
+ else:
+ mtimedb["ldpath"][x]=newldpathtime
+ ld_cache_update=True
+
+ # ldconfig has very different behaviour between FreeBSD and Linux
+ if ostype=="Linux" or ostype.lower().endswith("gnu"):
+ if (ld_cache_update or makelinks):
+ # We can't update links if we haven't cleaned other versions first, as
+ # an older package installed ON TOP of a newer version will cause ldconfig
+ # to overwrite the symlinks we just made. -X means no links. After 'clean'
+ # we can safely create links.
+ writemsg(">>> Regenerating "+str(root)+"etc/ld.so.cache...\n")
+ if makelinks:
+ commands.getstatusoutput("cd / ; /sbin/ldconfig -r "+root)
+ else:
+ commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r "+root)
+ elif ostype == "FreeBSD":
+ if (ld_cache_update):
+ writemsg(">>> Regenerating "+str(root)+"var/run/ld-elf.so.hints...\n")
+ commands.getstatusoutput("cd / ; /sbin/ldconfig -elf -f "+str(root)+"var/run/ld-elf.so.hints "+str(root)+"etc/ld.so.conf")
+
+ del specials["LDPATH"]
+
+ penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
+ penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
+ cenvnotice = penvnotice[:]
+ penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
+ cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
+
+ #create /etc/profile.env for bash support
+ outfile=open(root+"/etc/profile.env","w")
+ outfile.write(penvnotice)
+
+ for path in specials.keys():
+ if len(specials[path])==0:
+ continue
+ outstring="export "+path+"='"
+ if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
+ for x in specials[path][:-1]:
+ outstring += x+" "
+ else:
+ for x in specials[path][:-1]:
+ outstring=outstring+x+":"
+ outstring=outstring+specials[path][-1]+"'"
+ outfile.write(outstring+"\n")
+
+ #create /etc/profile.env
+ for x in env.keys():
+ if type(env[x])!=types.StringType:
+ continue
+ outfile.write("export "+x+"='"+env[x]+"'\n")
+ outfile.close()
+
+ #create /etc/csh.env for (t)csh support
+ outfile=open(root+"/etc/csh.env","w")
+ outfile.write(cenvnotice)
+
+ for path in specials.keys():
+ if len(specials[path])==0:
+ continue
+ outstring="setenv "+path+" '"
+ if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
+ for x in specials[path][:-1]:
+ outstring += x+" "
+ else:
+ for x in specials[path][:-1]:
+ outstring=outstring+x+":"
+ outstring=outstring+specials[path][-1]+"'"
+ outfile.write(outstring+"\n")
+ #get it out of the way
+ del specials[path]
+
+ #create /etc/csh.env
+ for x in env.keys():
+ if type(env[x])!=types.StringType:
+ continue
+ outfile.write("setenv "+x+" '"+env[x]+"'\n")
+ outfile.close()
+
+def new_protect_filename(mydest, newmd5=None):
+ """Resolves a config-protect filename for merging, optionally
+ using the last filename if the md5 matches.
+ (dest,md5) ==> 'string' --- path_to_target_filename
+ (dest) ==> ('next', 'highest') --- next_target and most-recent_target
+ """
+
+ # config protection filename format:
+ # ._cfg0000_foo
+ # 0123456789012
+ prot_num=-1
+ last_pfile=""
+
+ if (len(mydest) == 0):
+ raise ValueError, "Empty path provided where a filename is required"
+ if (mydest[-1]=="/"): # XXX add better directory checking
+ raise ValueError, "Directory provided but this function requires a filename"
+ if not os.path.exists(mydest):
+ return mydest
+
+ real_filename = os.path.basename(mydest)
+ real_dirname = os.path.dirname(mydest)
+ for pfile in listdir(real_dirname):
+ if pfile[0:5] != "._cfg":
+ continue
+ if pfile[10:] != real_filename:
+ continue
+ try:
+ new_prot_num = int(pfile[5:9])
+ if new_prot_num > prot_num:
+ prot_num = new_prot_num
+ last_pfile = pfile
+ except SystemExit, e:
+ raise
+ except:
+ continue
+ prot_num = prot_num + 1
+
+ new_pfile = os.path.normpath(real_dirname+"/._cfg"+string.zfill(prot_num,4)+"_"+real_filename)
+ old_pfile = os.path.normpath(real_dirname+"/"+last_pfile)
+ if last_pfile and newmd5:
+ if portage_checksum.perform_md5(real_dirname+"/"+last_pfile) == newmd5:
+ return old_pfile
+ else:
+ return new_pfile
+ elif newmd5:
+ return new_pfile
+ else:
+ return (new_pfile, old_pfile)
+
+#XXX: These two are now implemented in portage_util.py but are needed here
+#XXX: until the isvalidatom() dependency is sorted out.
+
+def grabdict_package(myfilename,juststrings=0):
+ pkgs=grabdict(myfilename, juststrings=juststrings, empty=1)
+ for x in pkgs.keys():
+ if not isvalidatom(x):
+ del(pkgs[x])
+ writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x))
+ return pkgs
+
+def grabfile_package(myfilename,compatlevel=0):
+ pkgs=grabfile(myfilename,compatlevel)
+ for x in range(len(pkgs)-1,-1,-1):
+ pkg = pkgs[x]
+ if pkg[0] == "-":
+ pkg = pkg[1:]
+ if pkg[0] == "*":
+ pkg = pkg[1:]
+ if not isvalidatom(pkg):
+ writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]))
+ del(pkgs[x])
+ return pkgs
+
+# returns a tuple. (version[string], error[string])
+# They are pretty much mutually exclusive.
+# Either version is a string and error is none, or
+# version is None and error is a string
+#
+def ExtractKernelVersion(base_dir):
+ lines = []
+ pathname = os.path.join(base_dir, 'Makefile')
+ try:
+ f = open(pathname, 'r')
+ except OSError, details:
+ return (None, str(details))
+ except IOError, details:
+ return (None, str(details))
+
+ try:
+ for i in range(4):
+ lines.append(f.readline())
+ except OSError, details:
+ return (None, str(details))
+ except IOError, details:
+ return (None, str(details))
+
+ lines = map(string.strip, lines)
+
+ version = ''
+
+ #XXX: The following code relies on the ordering of vars within the Makefile
+ for line in lines:
+ # split on the '=' then remove annoying whitespace
+ items = string.split(line, '=')
+ items = map(string.strip, items)
+ if items[0] == 'VERSION' or \
+ items[0] == 'PATCHLEVEL':
+ version += items[1]
+ version += "."
+ elif items[0] == 'SUBLEVEL':
+ version += items[1]
+ elif items[0] == 'EXTRAVERSION' and \
+ items[-1] != items[0]:
+ version += items[1]
+
+ # Grab a list of files named localversion* and sort them
+ localversions = os.listdir(base_dir)
+ for x in range(len(localversions)-1,-1,-1):
+ if localversions[x][:12] != "localversion":
+ del localversions[x]
+ localversions.sort()
+
+ # Append the contents of each to the version string, stripping ALL whitespace
+ for lv in localversions:
+ version += string.join(string.split(string.join(grabfile(base_dir+"/"+lv))), "")
+
+ # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
+ kernelconfig = getconfig(base_dir+"/.config")
+ if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
+ version += string.join(string.split(kernelconfig["CONFIG_LOCALVERSION"]), "")
+
+ return (version,None)
+
+
+autouse_val = None
+def autouse(myvartree,use_cache=1):
+ "returns set of USE variables auto-enabled due to packages being installed"
+ global usedefaults, autouse_val
+ if autouse_val is not None:
+ return autouse_val
+ if profiledir==None:
+ autouse_val = ""
+ return ""
+ myusevars=""
+ for myuse in usedefaults:
+ dep_met = True
+ for mydep in usedefaults[myuse]:
+ if not myvartree.dep_match(mydep,use_cache=True):
+ dep_met = False
+ break
+ if dep_met:
+ myusevars += " "+myuse
+ autouse_val = myusevars
+ return myusevars
+
+def check_config_instance(test):
+ if not test or (str(test.__class__) != 'portage.config'):
+ raise TypeError, "Invalid type for config object: %s" % test.__class__
+
+class config:
+ def __init__(self, clone=None, mycpv=None, config_profile_path=None, config_incrementals=None):
+
+ self.already_in_regenerate = 0
+
+ self.locked = 0
+ self.mycpv = None
+ self.puse = []
+ self.modifiedkeys = []
+
+ self.virtuals = {}
+ self.v_count = 0
+
+ # Virtuals obtained from the vartree
+ self.treeVirtuals = {}
+ # Virtuals by user specification. Includes negatives.
+ self.userVirtuals = {}
+ # Virtual negatives from user specifications.
+ self.negVirtuals = {}
+
+ self.user_profile_dir = None
+
+ if clone:
+ self.incrementals = copy.deepcopy(clone.incrementals)
+ self.profile_path = copy.deepcopy(clone.profile_path)
+ self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
+
+ self.module_priority = copy.deepcopy(clone.module_priority)
+ self.modules = copy.deepcopy(clone.modules)
+
+ self.depcachedir = copy.deepcopy(clone.depcachedir)
+
+ self.packages = copy.deepcopy(clone.packages)
+ self.virtuals = copy.deepcopy(clone.virtuals)
+
+ self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
+ self.userVirtuals = copy.deepcopy(clone.userVirtuals)
+ self.negVirtuals = copy.deepcopy(clone.negVirtuals)
+
+ self.use_defs = copy.deepcopy(clone.use_defs)
+ self.usemask = copy.deepcopy(clone.usemask)
+
+ self.configlist = copy.deepcopy(clone.configlist)
+ self.configlist[-1] = os.environ.copy()
+ self.configdict = { "globals": self.configlist[0],
+ "defaults": self.configlist[1],
+ "conf": self.configlist[2],
+ "pkg": self.configlist[3],
+ "auto": self.configlist[4],
+ "backupenv": self.configlist[5],
+ "env": self.configlist[6] }
+ self.profiles = copy.deepcopy(clone.profiles)
+ self.backupenv = copy.deepcopy(clone.backupenv)
+ self.pusedict = copy.deepcopy(clone.pusedict)
+ self.categories = copy.deepcopy(clone.categories)
+ self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
+ self.pmaskdict = copy.deepcopy(clone.pmaskdict)
+ self.punmaskdict = copy.deepcopy(clone.punmaskdict)
+ self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
+ self.pprovideddict = copy.deepcopy(clone.pprovideddict)
+ self.lookuplist = copy.deepcopy(clone.lookuplist)
+ self.uvlist = copy.deepcopy(clone.uvlist)
+ self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
+ self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
+ else:
+ self.depcachedir = DEPCACHE_PATH
+
+ if not config_profile_path:
+ global profiledir
+ writemsg("config_profile_path not specified to class config\n")
+ self.profile_path = profiledir[:]
+ else:
+ self.profile_path = config_profile_path[:]
+
+ if not config_incrementals:
+ writemsg("incrementals not specified to class config\n")
+ self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
+ else:
+ self.incrementals = copy.deepcopy(config_incrementals)
+
+ self.module_priority = ["user","default"]
+ self.modules = {}
+ self.modules["user"] = getconfig(MODULES_FILE_PATH)
+ if self.modules["user"] == None:
+ self.modules["user"] = {}
+ self.modules["default"] = {
+ "portdbapi.metadbmodule": "portage_db_flat.database",
+ "portdbapi.auxdbmodule": "portage_db_flat.database",
+ "eclass_cache.dbmodule": "portage_db_cpickle.database",
+ }
+
+ self.usemask=[]
+ self.configlist=[]
+ self.backupenv={}
+ # back up our incremental variables:
+ self.configdict={}
+ # configlist will contain: [ globals, defaults, conf, pkg, auto, backupenv (incrementals), origenv ]
+
+ # The symlink might not exist or might not be a symlink.
+ try:
+ self.profiles=[abssymlink(self.profile_path)]
+ except SystemExit, e:
+ raise
+ except:
+ self.profiles=[self.profile_path]
+
+ mypath = self.profiles[0]
+ while os.path.exists(mypath+"/parent"):
+ mypath = os.path.normpath(mypath+"///"+grabfile(mypath+"/parent")[0])
+ if os.path.exists(mypath):
+ self.profiles.insert(0,mypath)
+
+ if os.environ.has_key("PORTAGE_CALLER") and os.environ["PORTAGE_CALLER"] == "repoman":
+ pass
+ else:
+ # XXX: This should depend on ROOT?
+ if os.path.exists("/"+CUSTOM_PROFILE_PATH):
+ self.user_profile_dir = os.path.normpath("/"+"///"+CUSTOM_PROFILE_PATH)
+ self.profiles.append(self.user_profile_dir[:])
+
+ self.packages_list = grab_multiple("packages", self.profiles, grabfile_package)
+ self.packages = stack_lists(self.packages_list, incremental=1)
+ del self.packages_list
+ #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
+
+ # revmaskdict
+ self.prevmaskdict={}
+ for x in self.packages:
+ mycatpkg=dep_getkey(x)
+ if not self.prevmaskdict.has_key(mycatpkg):
+ self.prevmaskdict[mycatpkg]=[x]
+ else:
+ self.prevmaskdict[mycatpkg].append(x)
+
+ # get profile-masked use flags -- INCREMENTAL Child over parent
+ usemask_lists = grab_multiple("use.mask", self.profiles, grabfile)
+ self.usemask = stack_lists(usemask_lists, incremental=True)
+ del usemask_lists
+ use_defs_lists = grab_multiple("use.defaults", self.profiles, grabdict)
+ self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
+ del use_defs_lists
+
+ try:
+ mygcfg_dlists = grab_multiple("make.globals", self.profiles+["/etc"], getconfig)
+ self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
+
+ if self.mygcfg == None:
+ self.mygcfg = {}
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! %s\n" % (e))
+ writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
+ writemsg("!!! Errors in this file should be reported on bugs.gentoo.org.\n")
+ sys.exit(1)
+ self.configlist.append(self.mygcfg)
+ self.configdict["globals"]=self.configlist[-1]
+
+ self.mygcfg = {}
+ if self.profiles:
+ try:
+ mygcfg_dlists = grab_multiple("make.defaults", self.profiles, getconfig)
+ self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
+ #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
+ if self.mygcfg == None:
+ self.mygcfg = {}
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! %s\n" % (e))
+ writemsg("!!! 'rm -Rf /usr/portage/profiles; emerge sync' may fix this. If it does\n")
+ writemsg("!!! not then please report this to bugs.gentoo.org and, if possible, a dev\n")
+ writemsg("!!! on #gentoo (irc.freenode.org)\n")
+ sys.exit(1)
+ self.configlist.append(self.mygcfg)
+ self.configdict["defaults"]=self.configlist[-1]
+
+ try:
+ # XXX: Should depend on root?
+ self.mygcfg=getconfig("/"+MAKE_CONF_FILE,allow_sourcing=True)
+ if self.mygcfg == None:
+ self.mygcfg = {}
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! %s\n" % (e))
+ writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
+ sys.exit(1)
+
+
+ self.configlist.append(self.mygcfg)
+ self.configdict["conf"]=self.configlist[-1]
+
+ self.configlist.append({})
+ self.configdict["pkg"]=self.configlist[-1]
+
+ #auto-use:
+ self.configlist.append({})
+ self.configdict["auto"]=self.configlist[-1]
+
+ #backup-env (for recording our calculated incremental variables:)
+ self.backupenv = os.environ.copy()
+ self.configlist.append(self.backupenv) # XXX Why though?
+ self.configdict["backupenv"]=self.configlist[-1]
+
+ self.configlist.append(os.environ.copy())
+ self.configdict["env"]=self.configlist[-1]
+
+
+ # make lookuplist for loading package.*
+ self.lookuplist=self.configlist[:]
+ self.lookuplist.reverse()
+
+ archlist = grabfile(self["PORTDIR"]+"/profiles/arch.list")
+ self.configdict["conf"]["PORTAGE_ARCHLIST"] = string.join(archlist)
+
+ if os.environ.get("PORTAGE_CALLER","") == "repoman":
+ # repoman shouldn't use local settings.
+ locations = [self["PORTDIR"] + "/profiles"]
+ self.pusedict = {}
+ self.pkeywordsdict = {}
+ self.punmaskdict = {}
+ else:
+ locations = [self["PORTDIR"] + "/profiles", USER_CONFIG_PATH]
+ for ov in self["PORTDIR_OVERLAY"].split():
+ ov = os.path.normpath(ov)
+ if os.path.isdir(ov+"/profiles"):
+ locations.append(ov+"/profiles")
+
+ pusedict=grabdict_package(USER_CONFIG_PATH+"/package.use")
+ self.pusedict = {}
+ for key in pusedict.keys():
+ cp = dep_getkey(key)
+ if not self.pusedict.has_key(cp):
+ self.pusedict[cp] = {}
+ self.pusedict[cp][key] = pusedict[key]
+
+ #package.keywords
+ pkgdict=grabdict_package(USER_CONFIG_PATH+"/package.keywords")
+ self.pkeywordsdict = {}
+ for key in pkgdict.keys():
+ # default to ~arch if no specific keyword is given
+ if not pkgdict[key]:
+ mykeywordlist = []
+ if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
+ groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
+ else:
+ groups = []
+ for keyword in groups:
+ if not keyword[0] in "~-":
+ mykeywordlist.append("~"+keyword)
+ pkgdict[key] = mykeywordlist
+ cp = dep_getkey(key)
+ if not self.pkeywordsdict.has_key(cp):
+ self.pkeywordsdict[cp] = {}
+ self.pkeywordsdict[cp][key] = pkgdict[key]
+
+ #package.unmask
+ pkgunmasklines = grabfile_package(USER_CONFIG_PATH+"/package.unmask")
+ self.punmaskdict = {}
+ for x in pkgunmasklines:
+ mycatpkg=dep_getkey(x)
+ if self.punmaskdict.has_key(mycatpkg):
+ self.punmaskdict[mycatpkg].append(x)
+ else:
+ self.punmaskdict[mycatpkg]=[x]
+
+ #getting categories from an external file now
+ categories = grab_multiple("categories", locations, grabfile)
+ self.categories = stack_lists(categories, incremental=1)
+ del categories
+
+ # get virtuals -- needs categories
+ self.loadVirtuals('/')
+
+ #package.mask
+ pkgmasklines = grab_multiple("package.mask", self.profiles + locations, grabfile_package)
+ pkgmasklines = stack_lists(pkgmasklines, incremental=1)
+
+ self.pmaskdict = {}
+ for x in pkgmasklines:
+ mycatpkg=dep_getkey(x)
+ if self.pmaskdict.has_key(mycatpkg):
+ self.pmaskdict[mycatpkg].append(x)
+ else:
+ self.pmaskdict[mycatpkg]=[x]
+
+ pkgprovidedlines = grab_multiple("package.provided", self.profiles, grabfile)
+ pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
+ for x in range(len(pkgprovidedlines)-1, -1, -1):
+ cpvr = catpkgsplit(pkgprovidedlines[x])
+ if not cpvr or cpvr[0] == "null":
+ writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n")
+ del pkgprovidedlines[x]
+
+ self.pprovideddict = {}
+ for x in pkgprovidedlines:
+ cpv=catpkgsplit(x)
+ if not x:
+ continue
+ mycatpkg=dep_getkey(x)
+ if self.pprovideddict.has_key(mycatpkg):
+ self.pprovideddict[mycatpkg].append(x)
+ else:
+ self.pprovideddict[mycatpkg]=[x]
+
+ self.lookuplist=self.configlist[:]
+ self.lookuplist.reverse()
+
+ useorder=self["USE_ORDER"]
+ if not useorder:
+ # reasonable defaults; this is important as without USE_ORDER,
+ # USE will always be "" (nothing set)!
+ useorder="env:pkg:conf:auto:defaults"
+ useordersplit=useorder.split(":")
+
+ self.uvlist=[]
+ for x in useordersplit:
+ if self.configdict.has_key(x):
+ if "PKGUSE" in self.configdict[x].keys():
+ del self.configdict[x]["PKGUSE"] # Delete PkgUse, Not legal to set.
+ #prepend db to list to get correct order
+ self.uvlist[0:0]=[self.configdict[x]]
+
+ self.configdict["env"]["PORTAGE_GID"]=str(portage_gid)
+ self.backupenv["PORTAGE_GID"]=str(portage_gid)
+
+ if self.has_key("PORT_LOGDIR") and not self["PORT_LOGDIR"]:
+ # port_logdir is defined, but empty. this causes a traceback in doebuild.
+ writemsg(yellow("!!!")+" PORT_LOGDIR was defined, but set to nothing.\n")
+ writemsg(yellow("!!!")+" Disabling it. Please set it to a non null value.\n")
+ del self["PORT_LOGDIR"]
+
+ if self["PORTAGE_CACHEDIR"]:
+ # XXX: Deprecated -- April 15 -- NJ
+ writemsg(yellow(">>> PORTAGE_CACHEDIR has been deprecated!")+"\n")
+ writemsg(">>> Please use PORTAGE_DEPCACHEDIR instead.\n")
+ self.depcachedir = self["PORTAGE_CACHEDIR"]
+ del self["PORTAGE_CACHEDIR"]
+
+ if self["PORTAGE_DEPCACHEDIR"]:
+ #the auxcache is the only /var/cache/edb/ entry that stays at / even when "root" changes.
+ # XXX: Could move with a CHROOT functionality addition.
+ self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
+ del self["PORTAGE_DEPCACHEDIR"]
+
+ overlays = string.split(self["PORTDIR_OVERLAY"])
+ if overlays:
+ new_ov=[]
+ for ov in overlays:
+ ov=os.path.normpath(ov)
+ if os.path.isdir(ov):
+ new_ov.append(ov)
+ else:
+ writemsg(red("!!! Invalid PORTDIR_OVERLAY (not a dir): "+ov+"\n"))
+ self["PORTDIR_OVERLAY"] = string.join(new_ov)
+ self.backup_changes("PORTDIR_OVERLAY")
+
+ self.regenerate()
+
+ self.features = portage_util.unique_array(self["FEATURES"].split())
+
+ #XXX: Should this be temporary? Is it possible at all to have a default?
+ if "gpg" in self.features:
+ if not os.path.exists(self["PORTAGE_GPG_DIR"]) or not os.path.isdir(self["PORTAGE_GPG_DIR"]):
+ writemsg("PORTAGE_GPG_DIR is invalid. Removing gpg from FEATURES.\n")
+ self.features.remove("gpg")
+
+ if "maketest" in self.features and "test" not in self.features:
+ self.features.append("test")
+
+ if not portage_exec.sandbox_capable and ("sandbox" in self.features or "usersandbox" in self.features):
+ writemsg(red("!!! Problem with sandbox binary. Disabling...\n\n"))
+ if "sandbox" in self.features:
+ self.features.remove("sandbox")
+ if "usersandbox" in self.features:
+ self.features.remove("usersandbox")
+
+ self.features.sort()
+ self["FEATURES"] = " ".join(["-*"]+self.features)
+ self.backup_changes("FEATURES")
+
+ if not len(self["CBUILD"]) and len(self["CHOST"]):
+ self["CBUILD"] = self["CHOST"]
+ self.backup_changes("CBUILD")
+
+ if mycpv:
+ self.setcpv(mycpv)
+
+ def loadVirtuals(self,root):
+ self.virtuals = self.getvirtuals(root)
+
+ def load_best_module(self,property_string):
+ best_mod = best_from_dict(property_string,self.modules,self.module_priority)
+ return load_mod(best_mod)
+
+ def lock(self):
+ self.locked = 1
+
+ def unlock(self):
+ self.locked = 0
+
+ def modifying(self):
+ if self.locked:
+ raise Exception, "Configuration is locked."
+
+ def backup_changes(self,key=None):
+ if key and self.configdict["env"].has_key(key):
+ self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
+ else:
+ raise KeyError, "No such key defined in environment: %s" % key
+
+ def reset(self,keeping_pkg=0,use_cache=1):
+ "reset environment to original settings"
+ for x in self.configlist[-1].keys():
+ if x not in self.backupenv.keys():
+ del self.configlist[-1][x]
+
+ self.configdict["env"].update(self.backupenv)
+
+ self.modifiedkeys = []
+ if not keeping_pkg:
+ self.puse = ""
+ self.configdict["pkg"].clear()
+ self.regenerate(use_cache=use_cache)
+
+ def load_infodir(self,infodir):
+ if self.configdict.has_key("pkg"):
+ for x in self.configdict["pkg"].keys():
+ del self.configdict["pkg"][x]
+ else:
+ writemsg("No pkg setup for settings instance?\n")
+ sys.exit(17)
+
+ if os.path.exists(infodir):
+ if os.path.exists(infodir+"/environment"):
+ self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
+
+ myre = re.compile('^[A-Z]+$')
+ for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
+ if myre.match(filename):
+ try:
+ mydata = string.strip(open(infodir+"/"+filename).read())
+ if len(mydata)<2048:
+ if filename == "USE":
+ self.configdict["pkg"][filename] = "-* "+mydata
+ else:
+ self.configdict["pkg"][filename] = mydata
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename)
+ pass
+ return 1
+ return 0
+
+ def setcpv(self,mycpv,use_cache=1):
+ self.modifying()
+ self.mycpv = mycpv
+ cp = dep_getkey(mycpv)
+ newpuse = ""
+ if self.pusedict.has_key(cp):
+ self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
+ if self.pusekey:
+ newpuse = string.join(self.pusedict[cp][self.pusekey])
+ if newpuse == self.puse:
+ return
+ self.puse = newpuse
+ self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
+ self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
+ self.reset(keeping_pkg=1,use_cache=use_cache)
+
+ def setinst(self,mycpv,mydbapi):
+ # Grab the virtuals this package provides and add them into the tree virtuals.
+ provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
+ if isinstance(mydbapi, portdbapi):
+ myuse = self["USE"]
+ else:
+ myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
+ virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
+
+ cp = dep_getkey(mycpv)
+ for virt in virts:
+ virt = dep_getkey(virt)
+ if not self.treeVirtuals.has_key(virt):
+ self.treeVirtuals[virt] = []
+ # XXX: Is this bad? -- It's a permanent modification
+ self.treeVirtuals[virt] = portage_util.unique_array(self.treeVirtuals[virt]+[cp])
+
+ self.virtuals = self.__getvirtuals_compile()
+
+
+ def regenerate(self,useonly=0,use_cache=1):
+ global usesplit,profiledir
+
+ if self.already_in_regenerate:
+ # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
+ writemsg("!!! Looping in regenerate.\n",1)
+ return
+ else:
+ self.already_in_regenerate = 1
+
+ if useonly:
+ myincrementals=["USE"]
+ else:
+ myincrementals=portage_const.INCREMENTALS
+ for mykey in myincrementals:
+ if mykey=="USE":
+ mydbs=self.uvlist
+ # XXX Global usage of db... Needs to go away somehow.
+ if db.has_key(root) and db[root].has_key("vartree"):
+ self.configdict["auto"]["USE"]=autouse(db[root]["vartree"],use_cache=use_cache)
+ else:
+ self.configdict["auto"]["USE"]=""
+ else:
+ mydbs=self.configlist[:-1]
+
+ myflags=[]
+ for curdb in mydbs:
+ if not curdb.has_key(mykey):
+ continue
+ #variables are already expanded
+ mysplit=curdb[mykey].split()
+
+ for x in mysplit:
+ if x=="-*":
+ # "-*" is a special "minus" var that means "unset all settings".
+ # so USE="-* gnome" will have *just* gnome enabled.
+ myflags=[]
+ continue
+
+ if x[0]=="+":
+ # Not legal. People assume too much. Complain.
+ writemsg(red("USE flags should not start with a '+': %s\n" % x))
+ x=x[1:]
+
+ if (x[0]=="-"):
+ if (x[1:] in myflags):
+ # Unset/Remove it.
+ del myflags[myflags.index(x[1:])]
+ continue
+
+ # We got here, so add it now.
+ if x not in myflags:
+ myflags.append(x)
+
+ myflags.sort()
+ #store setting in last element of configlist, the original environment:
+ self.configlist[-1][mykey]=string.join(myflags," ")
+ del myflags
+
+ #cache split-up USE var in a global
+ usesplit=[]
+
+ for x in string.split(self.configlist[-1]["USE"]):
+ if x not in self.usemask:
+ usesplit.append(x)
+
+ if self.has_key("USE_EXPAND"):
+ for var in string.split(self["USE_EXPAND"]):
+ if self.has_key(var):
+ for x in string.split(self[var]):
+ mystr = string.lower(var)+"_"+x
+ if mystr not in usesplit:
+ usesplit.append(mystr)
+
+ # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
+ if self.configdict["defaults"].has_key("ARCH"):
+ if self.configdict["defaults"]["ARCH"]:
+ if self.configdict["defaults"]["ARCH"] not in usesplit:
+ usesplit.insert(0,self.configdict["defaults"]["ARCH"])
+
+ self.configlist[-1]["USE"]=string.join(usesplit," ")
+
+ self.already_in_regenerate = 0
+
+ def getvirtuals(self, myroot):
+ myvirts = {}
+
+ # This breaks catalyst/portage when setting to a fresh/empty root.
+ # Virtuals cannot be calculated because there is nothing to work
+ # from. So the only ROOT prefixed dir should be local configs.
+ #myvirtdirs = prefix_array(self.profiles,myroot+"/")
+ myvirtdirs = copy.deepcopy(self.profiles)
+ while self.user_profile_dir in myvirtdirs:
+ myvirtdirs.remove(self.user_profile_dir)
+
+
+ # Rules
+ # R1: Collapse profile virtuals
+ # R2: Extract user-negatives.
+ # R3: Collapse user-virtuals.
+ # R4: Apply user negatives to all except user settings.
+
+ # Order of preference:
+ # 1. user-declared that are installed
+ # 3. installed and in profile
+ # 4. installed
+ # 2. user-declared set
+ # 5. profile
+
+ self.dirVirtuals = grab_multiple("virtuals", myvirtdirs, grabdict)
+ self.dirVirtuals.reverse()
+
+ if self.user_profile_dir and os.path.exists(self.user_profile_dir+"/virtuals"):
+ self.userVirtuals = grabdict(self.user_profile_dir+"/virtuals")
+
+ # Store all the negatives for later.
+ for x in self.userVirtuals.keys():
+ self.negVirtuals[x] = []
+ for y in self.userVirtuals[x]:
+ if y[0] == '-':
+ self.negVirtuals[x].append(y[:])
+
+ # Collapse the user virtuals so that we don't deal with negatives.
+ self.userVirtuals = stack_dictlist([self.userVirtuals],incremental=1)
+
+ # Collapse all the profile virtuals including user negations.
+ self.dirVirtuals = stack_dictlist([self.negVirtuals]+self.dirVirtuals,incremental=1)
+
+ # Repoman does not use user or tree virtuals.
+ if os.environ.get("PORTAGE_CALLER","") != "repoman":
+ # XXX: vartree does not use virtuals, does user set matter?
+ temp_vartree = vartree(myroot,self.dirVirtuals,categories=self.categories)
+ # Reduce the provides into a list by CP.
+ self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
+
+ return self.__getvirtuals_compile()
+
+ def __getvirtuals_compile(self):
+ """Actually generate the virtuals we have collected.
+ The results are reversed so the list order is left to right.
+ Given data is [Best,Better,Good] sets of [Good, Better, Best]"""
+
+ # Virtuals by profile+tree preferences.
+ ptVirtuals = {}
+ # Virtuals by user+tree preferences.
+ utVirtuals = {}
+
+ # If a user virtual is already installed, we preference it.
+ for x in self.userVirtuals.keys():
+ utVirtuals[x] = []
+ if self.treeVirtuals.has_key(x):
+ for y in self.userVirtuals[x]:
+ if y in self.treeVirtuals[x]:
+ utVirtuals[x].append(y)
+ #print "F:",utVirtuals
+ #utVirtuals[x].reverse()
+ #print "R:",utVirtuals
+
+ # If a profile virtual is already installed, we preference it.
+ for x in self.dirVirtuals.keys():
+ ptVirtuals[x] = []
+ if self.treeVirtuals.has_key(x):
+ for y in self.dirVirtuals[x]:
+ if y in self.treeVirtuals[x]:
+ ptVirtuals[x].append(y)
+
+ # UserInstalled, ProfileInstalled, Installed, User, Profile
+ biglist = [utVirtuals, ptVirtuals, self.treeVirtuals,
+ self.userVirtuals, self.dirVirtuals]
+
+ # We reverse each dictlist so that the order matches everything
+ # else in portage. [-*, a, b] [b, c, d] ==> [b, a]
+ for dictlist in biglist:
+ for key in dictlist:
+ dictlist[key].reverse()
+
+ # User settings and profile settings take precedence over tree.
+ val = stack_dictlist(biglist,incremental=1)
+
+ return val
+
+ def __delitem__(self,mykey):
+ for x in self.lookuplist:
+ if x != None:
+ if mykey in x:
+ del x[mykey]
+
+ def __getitem__(self,mykey):
+ match = ''
+ for x in self.lookuplist:
+ if x == None:
+ writemsg("!!! lookuplist is null.\n")
+ elif x.has_key(mykey):
+ match = x[mykey]
+ break
+
+ if 0 and match and mykey in ["PORTAGE_BINHOST"]:
+ # These require HTTP Encoding
+ try:
+ import urllib
+ if urllib.unquote(match) != match:
+ writemsg("Note: %s already contains escape codes." % (mykey))
+ else:
+ match = urllib.quote(match)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("Failed to fix %s using urllib, attempting to continue.\n" % (mykey))
+ pass
+
+ elif mykey == "CONFIG_PROTECT_MASK":
+ match += " /etc/env.d"
+
+ return match
+
+ def has_key(self,mykey):
+ for x in self.lookuplist:
+ if x.has_key(mykey):
+ return 1
+ return 0
+
+ def keys(self):
+ mykeys=[]
+ for x in self.lookuplist:
+ for y in x.keys():
+ if y not in mykeys:
+ mykeys.append(y)
+ return mykeys
+
+ def __setitem__(self,mykey,myvalue):
+ "set a value; will be thrown away at reset() time"
+ if type(myvalue) != types.StringType:
+ raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
+ self.modifying()
+ self.modifiedkeys += [mykey]
+ self.configdict["env"][mykey]=myvalue
+
+ def environ(self):
+ "return our locally-maintained environment"
+ mydict={}
+ for x in self.keys():
+ mydict[x]=self[x]
+ if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
+ writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
+ mydict["HOME"]=mydict["BUILD_PREFIX"][:]
+
+ return mydict
+
+
+# XXX This would be to replace getstatusoutput completely.
+# XXX Issue: cannot block execution. Deadlock condition.
+def spawn(mystring,mysettings,debug=0,free=0,droppriv=0,fd_pipes=None,**keywords):
+ """spawn a subprocess with optional sandbox protection,
+ depending on whether sandbox is enabled. The "free" argument,
+ when set to 1, will disable sandboxing. This allows us to
+ spawn processes that are supposed to modify files outside of the
+ sandbox. We can't use os.system anymore because it messes up
+ signal handling. Using spawn allows our Portage signal handler
+ to work."""
+
+ if type(mysettings) == types.DictType:
+ env=mysettings
+ keywords["opt_name"]="[ %s ]" % "portage"
+ else:
+ check_config_instance(mysettings)
+ env=mysettings.environ()
+ keywords["opt_name"]="[%s]" % mysettings["PF"]
+
+ # XXX: Negative RESTRICT word
+ droppriv=(droppriv and ("userpriv" in features) and not \
+ (("nouserpriv" in string.split(mysettings["RESTRICT"])) or \
+ ("userpriv" in string.split(mysettings["RESTRICT"]))))
+
+ if droppriv and not uid and portage_gid and portage_uid:
+ keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":[portage_gid],"umask":002})
+
+ if not free:
+ free=((droppriv and "usersandbox" not in features) or \
+ (not droppriv and "sandbox" not in features and "usersandbox" not in features))
+
+ if not free:
+ keywords["opt_name"] += " sandbox"
+ return portage_exec.spawn_sandbox(mystring,env=env,**keywords)
+ else:
+ keywords["opt_name"] += " bash"
+ return portage_exec.spawn_bash(mystring,env=env,**keywords)
+
+
+
+def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
+ "fetch files. Will use digest file if available."
+
+ # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
+ if ("mirror" in mysettings["RESTRICT"].split()) or \
+ ("nomirror" in mysettings["RESTRICT"].split()):
+ if ("mirror" in features) and ("lmirror" not in features):
+ # lmirror should allow you to bypass mirror restrictions.
+ # XXX: This is not a good thing, and is temporary at best.
+ print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
+ return 1
+
+ global thirdpartymirrors
+
+ check_config_instance(mysettings)
+
+ custommirrors=grabdict(CUSTOM_MIRRORS_FILE)
+
+ mymirrors=[]
+
+ if listonly or ("distlocks" not in features):
+ use_locks = 0
+
+ fetch_to_ro = 0
+ if "skiprocheck" in features:
+ fetch_to_ro = 1
+
+ if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
+ if use_locks:
+ writemsg(red("!!! You are fetching to a read-only filesystem, you should turn locking off"));
+ writemsg("!!! This can be done by adding -distlocks to FEATURES in /etc/make.conf");
+# use_locks = 0
+
+ # local mirrors are always added
+ if custommirrors.has_key("local"):
+ mymirrors += custommirrors["local"]
+
+ if ("nomirror" in mysettings["RESTRICT"].split()) or \
+ ("mirror" in mysettings["RESTRICT"].split()):
+ # We don't add any mirrors.
+ pass
+ else:
+ if try_mirrors:
+ for x in mysettings["GENTOO_MIRRORS"].split():
+ if x:
+ if x[-1] == '/':
+ mymirrors += [x[:-1]]
+ else:
+ mymirrors += [x]
+
+ mydigests = {}
+ digestfn = mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
+ if os.path.exists(digestfn):
+ mydigests = digestParseFile(digestfn)
+
+ fsmirrors = []
+ for x in range(len(mymirrors)-1,-1,-1):
+ if mymirrors[x] and mymirrors[x][0]=='/':
+ fsmirrors += [mymirrors[x]]
+ del mymirrors[x]
+
+ for myuri in myuris:
+ myfile=os.path.basename(myuri)
+ try:
+ destdir = mysettings["DISTDIR"]+"/"
+ if not os.path.exists(destdir+myfile):
+ for mydir in fsmirrors:
+ if os.path.exists(mydir+"/"+myfile):
+ writemsg(_("Local mirror has file: %(file)s\n" % {"file":myfile}))
+ shutil.copyfile(mydir+"/"+myfile,destdir+"/"+myfile)
+ break
+ except (OSError,IOError),e:
+ # file does not exist
+ writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
+ gotit=0
+
+ if "fetch" in mysettings["RESTRICT"].split():
+ # fetch is restricted. Ensure all files have already been downloaded; otherwise,
+ # print message and exit.
+ gotit=1
+ for myuri in myuris:
+ myfile=os.path.basename(myuri)
+ try:
+ mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
+ except (OSError,IOError),e:
+ # file does not exist
+ writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
+ gotit=0
+ if not gotit:
+ print
+ print "!!!",mysettings["CATEGORY"]+"/"+mysettings["PF"],"has fetch restriction turned on."
+ print "!!! This probably means that this ebuild's files must be downloaded"
+ print "!!! manually. See the comments in the ebuild for more information."
+ print
+ spawn(EBUILD_SH_BINARY+" nofetch",mysettings)
+ return 0
+ return 1
+ locations=mymirrors[:]
+ filedict={}
+ primaryuri_indexes={}
+ for myuri in myuris:
+ myfile=os.path.basename(myuri)
+ if not filedict.has_key(myfile):
+ filedict[myfile]=[]
+ for y in range(0,len(locations)):
+ filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
+ if myuri[:9]=="mirror://":
+ eidx = myuri.find("/", 9)
+ if eidx != -1:
+ mirrorname = myuri[9:eidx]
+
+ # Try user-defined mirrors first
+ if custommirrors.has_key(mirrorname):
+ for cmirr in custommirrors[mirrorname]:
+ filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
+ # remove the mirrors we tried from the list of official mirrors
+ if cmirr.strip() in thirdpartymirrors[mirrorname]:
+ thirdpartymirrors[mirrorname].remove(cmirr)
+ # now try the official mirrors
+ if thirdpartymirrors.has_key(mirrorname):
+ try:
+ shuffle(thirdpartymirrors[mirrorname])
+ except SystemExit, e:
+ raise
+ except:
+ writemsg(red("!!! YOU HAVE A BROKEN PYTHON/GLIBC.\n"))
+ writemsg( "!!! You are most likely on a pentium4 box and have specified -march=pentium4\n")
+ writemsg( "!!! or -fpmath=sse2. GCC was generating invalid sse2 instructions in versions\n")
+ writemsg( "!!! prior to 3.2.3. Please merge the latest gcc or rebuid python with either\n")
+ writemsg( "!!! -march=pentium3 or set -mno-sse2 in your cflags.\n\n\n")
+ time.sleep(10)
+
+ for locmirr in thirdpartymirrors[mirrorname]:
+ filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
+
+ if not filedict[myfile]:
+ writemsg("No known mirror by the name: %s\n" % (mirrorname))
+ else:
+ writemsg("Invalid mirror definition in SRC_URI:\n")
+ writemsg(" %s\n" % (myuri))
+ else:
+ if "primaryuri" in mysettings["RESTRICT"].split():
+ # Use the source site first.
+ if primaryuri_indexes.has_key(myfile):
+ primaryuri_indexes[myfile] += 1
+ else:
+ primaryuri_indexes[myfile] = 0
+ filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
+ else:
+ filedict[myfile].append(myuri)
+
+ missingSourceHost = False
+ for myfile in filedict.keys(): # Gives a list, not just the first one
+ if not filedict[myfile]:
+ writemsg("Warning: No mirrors available for file '%s'\n" % (myfile))
+ missingSourceHost = True
+ if missingSourceHost:
+ return 0
+ del missingSourceHost
+
+ can_fetch=True
+ if not os.access(mysettings["DISTDIR"]+"/",os.W_OK):
+ if not fetch_to_ro:
+ print "!!! No write access to %s" % mysettings["DISTDIR"]+"/"
+ can_fetch=False
+ else:
+ mystat=os.stat(mysettings["DISTDIR"]+"/")
+ if mystat.st_gid != portage_gid:
+ try:
+ os.chown(mysettings["DISTDIR"],-1,portage_gid)
+ except OSError, oe:
+ if oe.errno == 1:
+ print red("!!!")+" Unable to chgrp of %s to portage, continuing\n" % mysettings["DISTDIR"]
+ else:
+ raise oe
+
+ # writable by portage_gid? This is specific to root, adjust perms if needed automatically.
+ if not stat.S_IMODE(mystat.st_mode) & 020:
+ try:
+ os.chmod(mysettings["DISTDIR"],stat.S_IMODE(mystat.st_mode) | 020)
+ except OSError, oe:
+ if oe.errno == 1:
+ print red("!!!")+" Unable to chmod %s to perms 0755. Non-root users will experience issues.\n" % mysettings["DISTDIR"]
+ else:
+ raise oe
+
+ if use_locks and locks_in_subdir:
+ if os.path.exists(mysettings["DISTDIR"]+"/"+locks_in_subdir):
+ if not os.access(mysettings["DISTDIR"]+"/"+locks_in_subdir,os.W_OK):
+ writemsg("!!! No write access to write to %s. Aborting.\n" % mysettings["DISTDIR"]+"/"+locks_in_subdir)
+ return 0
+ else:
+ old_umask=os.umask(0002)
+ os.mkdir(mysettings["DISTDIR"]+"/"+locks_in_subdir,0775)
+ if os.stat(mysettings["DISTDIR"]+"/"+locks_in_subdir).st_gid != portage_gid:
+ try:
+ os.chown(mysettings["DISTDIR"]+"/"+locks_in_subdir,-1,portage_gid)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ os.umask(old_umask)
+
+
+ for myfile in filedict.keys():
+ fetched=0
+ file_lock = None
+ if listonly:
+ writemsg("\n")
+ else:
+ if use_locks and can_fetch:
+ if locks_in_subdir:
+ file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
+ else:
+ file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
+ try:
+ for loc in filedict[myfile]:
+ if listonly:
+ writemsg(loc+" ")
+ continue
+ # allow different fetchcommands per protocol
+ protocol = loc[0:loc.find("://")]
+ if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
+ fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
+ else:
+ fetchcommand=mysettings["FETCHCOMMAND"]
+ if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
+ resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
+ else:
+ resumecommand=mysettings["RESUMECOMMAND"]
+
+ fetchcommand=string.replace(fetchcommand,"${DISTDIR}",mysettings["DISTDIR"])
+ resumecommand=string.replace(resumecommand,"${DISTDIR}",mysettings["DISTDIR"])
+
+ try:
+ mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
+ if mydigests.has_key(myfile):
+ #if we have the digest file, we know the final size and can resume the download.
+ if mystat[stat.ST_SIZE]<mydigests[myfile]["size"]:
+ fetched=1
+ else:
+ #we already have it downloaded, skip.
+ #if our file is bigger than the recorded size, digestcheck should catch it.
+ if not fetchonly:
+ fetched=2
+ else:
+ # Check md5sum's at each fetch for fetchonly.
+ verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
+ if not verified_ok:
+ writemsg("!!! Previously fetched file: "+str(myfile)+"\n!!! Reason: "+reason+"\nRefetching...\n\n")
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ fetched=0
+ else:
+ for x_key in mydigests[myfile].keys():
+ writemsg(">>> Previously fetched file: "+str(myfile)+" "+x_key+" ;-)\n")
+ fetched=2
+ break #No need to keep looking for this file, we have it!
+ else:
+ #we don't have the digest file, but the file exists. Assume it is fully downloaded.
+ fetched=2
+ except (OSError,IOError),e:
+ writemsg("An exception was caught(1)...\nFailing the download: %s.\n" % (str(e)),1)
+ fetched=0
+
+ if not can_fetch:
+ if fetched != 2:
+ if fetched == 0:
+ writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile)
+ else:
+ writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile)
+ return 0
+ else:
+ continue
+
+ # check if we can actually write to the directory/existing file.
+ if fetched!=2 and os.path.exists(mysettings["DISTDIR"]+"/"+myfile) != \
+ os.access(mysettings["DISTDIR"]+"/"+myfile, os.W_OK) and not fetch_to_ro:
+ writemsg(red("***")+" Lack write access to %s, failing fetch\n" % str(mysettings["DISTDIR"]+"/"+myfile))
+ fetched=0
+ break
+ elif fetched!=2:
+ #we either need to resume or start the download
+ #you can't use "continue" when you're inside a "try" block
+ if fetched==1:
+ #resume mode:
+ writemsg(">>> Resuming download...\n")
+ locfetch=resumecommand
+ else:
+ #normal mode:
+ locfetch=fetchcommand
+ writemsg(">>> Downloading "+str(loc)+"\n")
+ myfetch=string.replace(locfetch,"${URI}",loc)
+ myfetch=string.replace(myfetch,"${FILE}",myfile)
+ try:
+ if selinux_enabled:
+ con=selinux.getcontext()
+ con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_FETCH_T"])
+ selinux.setexec(con)
+ myret=spawn(myfetch,mysettings,free=1, droppriv=("userfetch" in mysettings.features))
+ selinux.setexec(None)
+ else:
+ myret=spawn(myfetch,mysettings,free=1, droppriv=("userfetch" in mysettings.features))
+ finally:
+ #if root, -always- set the perms.
+ if os.path.exists(mysettings["DISTDIR"]+"/"+myfile) and (fetched != 1 or os.getuid() == 0) \
+ and os.access(mysettings["DISTDIR"]+"/",os.W_OK):
+ if os.stat(mysettings["DISTDIR"]+"/"+myfile).st_gid != portage_gid:
+ try:
+ os.chown(mysettings["DISTDIR"]+"/"+myfile,-1,portage_gid)
+ except SystemExit, e:
+ raise
+ except:
+ portage_util.writemsg("chown failed on distfile: " + str(myfile))
+ os.chmod(mysettings["DISTDIR"]+"/"+myfile,0664)
+
+ if mydigests!=None and mydigests.has_key(myfile):
+ try:
+ mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
+ # no exception? file exists. let digestcheck() report
+ # an appropriately for size or md5 errors
+ if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
+ # Fetch failed... Try the next one... Kill 404 files though.
+ if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
+ html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
+ try:
+ if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
+ try:
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ continue
+ if not fetchonly:
+ fetched=2
+ break
+ else:
+ # File is the correct size--check the MD5 sum for the fetched
+ # file NOW, for those users who don't have a stable/continuous
+ # net connection. This way we have a chance to try to download
+ # from another mirror...
+ verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
+ if not verified_ok:
+ writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n!!! Reason: "+reason+"\nRemoving corrupt distfile...\n")
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ fetched=0
+ else:
+ for x_key in mydigests[myfile].keys():
+ writemsg(">>> "+str(myfile)+" "+x_key+" ;-)\n")
+ fetched=2
+ break
+ except (OSError,IOError),e:
+ writemsg("An exception was caught(2)...\nFailing the download: %s.\n" % (str(e)),1)
+ fetched=0
+ else:
+ if not myret:
+ fetched=2
+ break
+ elif mydigests!=None:
+ writemsg("No digest file available and download failed.\n\n")
+ finally:
+ if use_locks and file_lock:
+ portage_locks.unlockfile(file_lock)
+
+ if listonly:
+ writemsg("\n")
+ if (fetched!=2) and not listonly:
+ writemsg("!!! Couldn't download "+str(myfile)+". Aborting.\n")
+ return 0
+ return 1
+
+
+def digestCreate(myfiles,basedir,oldDigest={}):
+ """Takes a list of files and the directory they are in and returns the
+ dict of dict[filename][CHECKSUM_KEY] = hash
+ returns None on error."""
+ mydigests={}
+ for x in myfiles:
+ print "<<<",x
+ myfile=os.path.normpath(basedir+"///"+x)
+ if os.path.exists(myfile):
+ if not os.access(myfile, os.R_OK):
+ print "!!! Given file does not appear to be readable. Does it exist?"
+ print "!!! File:",myfile
+ return None
+ mydigests[x] = portage_checksum.perform_all(myfile)
+ mysize = os.stat(myfile)[stat.ST_SIZE]
+ else:
+ if x in oldDigest:
+ # DeepCopy because we might not have a unique reference.
+ mydigests[x] = copy.deepcopy(oldDigest[x])
+ mysize = copy.deepcopy(oldDigest[x]["size"])
+ else:
+ print "!!! We have a source URI, but no file..."
+ print "!!! File:",myfile
+ return None
+
+ if mydigests[x].has_key("size") and (mydigests[x]["size"] != mysize):
+ raise portage_exception.DigestException, "Size mismatch during checksums"
+ mydigests[x]["size"] = copy.deepcopy(mysize)
+ return mydigests
+
+def digestCreateLines(filelist, mydict):
+ mylines = []
+ mydigests = copy.deepcopy(mydict)
+ for myarchive in filelist:
+ mysize = mydigests[myarchive]["size"]
+ if len(mydigests[myarchive]) == 0:
+ raise portage_exception.DigestException, "No generate digest for '%(file)s'" % {"file":myarchive}
+ for sumName in mydigests[myarchive].keys():
+ if sumName not in portage_checksum.get_valid_checksum_keys():
+ continue
+ mysum = mydigests[myarchive][sumName]
+
+ myline = sumName[:]
+ myline += " "+mysum
+ myline += " "+myarchive
+ myline += " "+str(mysize)
+ if sumName != "MD5":
+ # XXXXXXXXXXXXXXXX This cannot be used!
+ # Older portage make very dumb assumptions about the formats.
+ # We need a lead-in period before we break everything.
+ continue
+ mylines.append(myline)
+ return mylines
+
+def digestgen(myarchives,mysettings,overwrite=1,manifestonly=0):
+ """generates digest file if missing. Assumes all files are available. If
+ overwrite=0, the digest will only be created if it doesn't already exist."""
+
+ # archive files
+ basedir=mysettings["DISTDIR"]+"/"
+ digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
+
+ # portage files -- p(ortagefiles)basedir
+ pbasedir=mysettings["O"]+"/"
+ manifestfn=pbasedir+"Manifest"
+
+ if not manifestonly:
+ if not os.path.isdir(mysettings["FILESDIR"]):
+ os.makedirs(mysettings["FILESDIR"])
+ mycvstree=cvstree.getentries(pbasedir, recursive=1)
+
+ if ("cvs" in features) and os.path.exists(pbasedir+"/CVS"):
+ if not cvstree.isadded(mycvstree,"files"):
+ if "autoaddcvs" in features:
+ print ">>> Auto-adding files/ dir to CVS..."
+ spawn("cd "+pbasedir+"; cvs add files",mysettings,free=1)
+ else:
+ print "--- Warning: files/ is not added to cvs."
+
+ if (not overwrite) and os.path.exists(digestfn):
+ return 1
+
+ print green(">>> Generating digest file...")
+
+ # Track the old digest so we can assume checksums without requiring
+ # all files to be downloaded. 'Assuming'
+ myolddigest = {}
+ if os.path.exists(digestfn):
+ myolddigest = digestParseFile(digestfn)
+
+ mydigests=digestCreate(myarchives, basedir, oldDigest=myolddigest)
+ if mydigests==None: # There was a problem, exit with an errorcode.
+ return 0
+
+ try:
+ outfile=open(digestfn, "w+")
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Filesystem error skipping generation. (Read-Only?)"
+ print "!!!",e
+ return 0
+ for x in digestCreateLines(myarchives, mydigests):
+ outfile.write(x+"\n")
+ outfile.close()
+ try:
+ os.chown(digestfn,os.getuid(),portage_gid)
+ os.chmod(digestfn,0664)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ print e
+
+ print green(">>> Generating manifest file...")
+ mypfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
+ mypfiles=cvstree.apply_cvsignore_filter(mypfiles)
+ for x in ["Manifest"]:
+ if x in mypfiles:
+ mypfiles.remove(x)
+
+ mydigests=digestCreate(mypfiles, pbasedir)
+ if mydigests==None: # There was a problem, exit with an errorcode.
+ return 0
+
+ try:
+ outfile=open(manifestfn, "w+")
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Filesystem error skipping generation. (Read-Only?)"
+ print "!!!",e
+ return 0
+ for x in digestCreateLines(mypfiles, mydigests):
+ outfile.write(x+"\n")
+ outfile.close()
+ try:
+ os.chown(manifestfn,os.getuid(),portage_gid)
+ os.chmod(manifestfn,0664)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ print e
+
+ if "cvs" in features and os.path.exists(pbasedir+"/CVS"):
+ mycvstree=cvstree.getentries(pbasedir, recursive=1)
+ myunaddedfiles=""
+ if not manifestonly and not cvstree.isadded(mycvstree,digestfn):
+ if digestfn[:len(pbasedir)]==pbasedir:
+ myunaddedfiles=digestfn[len(pbasedir):]+" "
+ else:
+ myunaddedfiles=digestfn+" "
+ if not cvstree.isadded(mycvstree,manifestfn[len(pbasedir):]):
+ if manifestfn[:len(pbasedir)]==pbasedir:
+ myunaddedfiles+=manifestfn[len(pbasedir):]+" "
+ else:
+ myunaddedfiles+=manifestfn
+ if myunaddedfiles:
+ if "autoaddcvs" in features:
+ print blue(">>> Auto-adding digest file(s) to CVS...")
+ spawn("cd "+pbasedir+"; cvs add "+myunaddedfiles,mysettings,free=1)
+ else:
+ print "--- Warning: digests are not yet added into CVS."
+ print darkgreen(">>> Computed message digests.")
+ print
+ return 1
+
+
+def digestParseFile(myfilename):
+ """(filename) -- Parses a given file for entries matching:
+ MD5 MD5_STRING_OF_HEX_CHARS FILE_NAME FILE_SIZE
+ Ignores lines that do not begin with 'MD5' and returns a
+ dict with the filenames as keys and [md5,size] as the values."""
+
+ if not os.path.exists(myfilename):
+ return None
+ mylines = portage_util.grabfile(myfilename, compat_level=1)
+
+ mydigests={}
+ for x in mylines:
+ myline=string.split(x)
+ if len(myline) < 4:
+ #invalid line
+ continue
+ if myline[0] not in portage_checksum.get_valid_checksum_keys():
+ continue
+ mykey = myline.pop(0)
+ myhash = myline.pop(0)
+ mysize = long(myline.pop())
+ myfn = string.join(myline, " ")
+ if myfn not in mydigests:
+ mydigests[myfn] = {}
+ mydigests[myfn][mykey] = myhash
+ if "size" in mydigests[myfn]:
+ if mydigests[myfn]["size"] != mysize:
+ raise portage_exception.DigestException, "Conflicting sizes in digest: %(filename)s" % {"filename":myfilename}
+ else:
+ mydigests[myfn]["size"] = mysize
+ return mydigests
+
+# XXXX strict was added here to fix a missing name error.
+# XXXX It's used below, but we're not paying attention to how we get it?
+def digestCheckFiles(myfiles, mydigests, basedir, note="", strict=0):
+ """(fileslist, digestdict, basedir) -- Takes a list of files and a dict
+ of their digests and checks the digests against the indicated files in
+ the basedir given. Returns 1 only if all files exist and match the md5s.
+ """
+ for x in myfiles:
+ if not mydigests.has_key(x):
+ print
+ print red("!!! No message digest entry found for file \""+x+".\"")
+ print "!!! Most likely a temporary problem. Try 'emerge sync' again later."
+ print "!!! If you are certain of the authenticity of the file then you may type"
+ print "!!! the following to generate a new digest:"
+ print "!!! ebuild /usr/portage/category/package/package-version.ebuild digest"
+ return 0
+ myfile=os.path.normpath(basedir+"/"+x)
+ if not os.path.exists(myfile):
+ if strict:
+ print "!!! File does not exist:",myfile
+ return 0
+ continue
+
+ ok,reason = portage_checksum.verify_all(myfile,mydigests[x])
+ if not ok:
+ print
+ print red("!!! Digest verification Failed:")
+ print red("!!!")+" "+str(myfile)
+ print red("!!! Reason: ")+reason
+ print
+ return 0
+ else:
+ print ">>> md5 "+note+" ;-)",x
+ return 1
+
+
+def digestcheck(myfiles, mysettings, strict=0):
+ """Checks md5sums. Assumes all files have been downloaded."""
+ # archive files
+ basedir=mysettings["DISTDIR"]+"/"
+ digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
+
+ # portage files -- p(ortagefiles)basedir
+ pbasedir=mysettings["O"]+"/"
+ manifestfn=pbasedir+"Manifest"
+
+ if not (os.path.exists(digestfn) and os.path.exists(manifestfn)):
+ if "digest" in features:
+ print ">>> No package digest/Manifest file found."
+ print ">>> \"digest\" mode enabled; auto-generating new digest..."
+ return digestgen(myfiles,mysettings)
+ else:
+ if not os.path.exists(manifestfn):
+ if strict:
+ print red("!!! No package manifest found:"),manifestfn
+ return 0
+ else:
+ print "--- No package manifest found:",manifestfn
+ if not os.path.exists(digestfn):
+ print "!!! No package digest file found:",digestfn
+ print "!!! Type \"ebuild foo.ebuild digest\" to generate it."
+ return 0
+
+ mydigests=digestParseFile(digestfn)
+ if mydigests==None:
+ print "!!! Failed to parse digest file:",digestfn
+ return 0
+ mymdigests=digestParseFile(manifestfn)
+ if "strict" not in features:
+ # XXX: Remove this when manifests become mainstream.
+ pass
+ elif mymdigests==None:
+ print "!!! Failed to parse manifest file:",manifestfn
+ if strict:
+ return 0
+ else:
+ # Check the portage-related files here.
+ mymfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
+ manifest_files = mymdigests.keys()
+ for x in ["Manifest", "ChangeLog", "metadata.xml"]:
+ while x in mymfiles:
+ mymfiles.remove(x)
+ while x in manifest_files:
+ manifest_files.remove(x)
+ for x in range(len(mymfiles)-1,-1,-1):
+ if mymfiles[x] in manifest_files:
+ manifest_files.remove(mymfiles[x])
+ elif len(cvstree.apply_cvsignore_filter([mymfiles[x]]))==0:
+ # we filter here, rather then above; manifest might have files flagged by the filter.
+ # if something is returned, then it's flagged as a bad file
+ # manifest doesn't know about it, so we kill it here.
+ del mymfiles[x]
+ else:
+ print red("!!! Security Violation: A file exists that is not in the manifest.")
+ print "!!! File:",mymfiles[x]
+ if strict:
+ return 0
+ if manifest_files and strict:
+ print red("!!! Files listed in the manifest do not exist!")
+ for x in manifest_files:
+ print x
+ return 0
+
+ if not digestCheckFiles(mymfiles, mymdigests, pbasedir, note="files ", strict=strict):
+ if strict:
+ print ">>> Please ensure you have sync'd properly. Please try '"+bold("emerge sync")+"' and"
+ print ">>> optionally examine the file(s) for corruption. "+bold("A sync will fix most cases.")
+ print
+ return 0
+ else:
+ print "--- Manifest check failed. 'strict' not enabled; ignoring."
+ print
+
+ # Just return the status, as it's the last check.
+ return digestCheckFiles(myfiles, mydigests, basedir, note="src_uri", strict=strict)
+
+# parse actionmap to spawn ebuild with the appropriate args
+def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
+ if alwaysdep or ("noauto" not in features):
+ # process dependency first
+ if "dep" in actionmap[mydo].keys():
+ retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
+ if retval:
+ return retval
+ # spawn ebuild.sh
+ mycommand = EBUILD_SH_BINARY + " "
+ if selinux_enabled and ("sesandbox" in features) and (mydo in ["unpack","compile","test","install"]):
+ con=selinux.getcontext()
+ con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_SANDBOX_T"])
+ selinux.setexec(con)
+ retval=spawn(mycommand + mydo,mysettings,debug=debug,
+ free=actionmap[mydo]["args"][0],
+ droppriv=actionmap[mydo]["args"][1],logfile=logfile)
+ selinux.setexec(None)
+ else:
+ retval=spawn(mycommand + mydo,mysettings, debug=debug,
+ free=actionmap[mydo]["args"][0],
+ droppriv=actionmap[mydo]["args"][1],logfile=logfile)
+ return retval
+
+def doebuild(myebuild,mydo,myroot,mysettings,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,use_cache=1,fetchall=0,tree="porttree"):
+ global db
+
+ ebuild_path = os.path.abspath(myebuild)
+ pkg_dir = os.path.dirname(ebuild_path)
+
+ if mysettings.configdict["pkg"].has_key("CATEGORY"):
+ cat = mysettings.configdict["pkg"]["CATEGORY"]
+ else:
+ cat = os.path.basename(os.path.normpath(pkg_dir+"/.."))
+ mypv = os.path.basename(ebuild_path)[:-7]
+ mycpv = cat+"/"+mypv
+
+ mysplit=pkgsplit(mypv,silent=0)
+ if mysplit==None:
+ writemsg("!!! Error: PF is null '%s'; exiting.\n" % mypv)
+ return 1
+
+ if mydo != "depend":
+ # XXX: We're doing a little hack here to curtain the gvisible locking
+ # XXX: that creates a deadlock... Really need to isolate that.
+ mysettings.reset(use_cache=use_cache)
+ mysettings.setcpv(mycpv,use_cache=use_cache)
+
+ validcommands = ["help","clean","prerm","postrm","preinst","postinst",
+ "config","setup","depend","fetch","digest",
+ "unpack","compile","test","install","rpm","qmerge","merge",
+ "package","unmerge", "manifest"]
+
+ if mydo not in validcommands:
+ validcommands.sort()
+ writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo)
+ for vcount in range(len(validcommands)):
+ if vcount%6 == 0:
+ writemsg("\n!!! ")
+ writemsg(string.ljust(validcommands[vcount], 11))
+ writemsg("\n")
+ return 1
+
+ if not os.path.exists(myebuild):
+ writemsg("!!! doebuild: "+str(myebuild)+" not found for "+str(mydo)+"\n")
+ return 1
+
+ if debug: # Otherwise it overrides emerge's settings.
+ # We have no other way to set debug... debug can't be passed in
+ # due to how it's coded... Don't overwrite this so we can use it.
+ mysettings["PORTAGE_DEBUG"]=str(debug)
+
+ mysettings["ROOT"] = myroot
+ mysettings["STARTDIR"] = getcwd()
+
+ mysettings["EBUILD"] = ebuild_path
+ mysettings["O"] = pkg_dir
+ mysettings["CATEGORY"] = cat
+ mysettings["FILESDIR"] = pkg_dir+"/files"
+ mysettings["PF"] = mypv
+
+ mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
+ mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
+
+ mysettings["PROFILE_PATHS"] = string.join(mysettings.profiles,"\n")+"\n"+CUSTOM_PROFILE_PATH
+ mysettings["P"] = mysplit[0]+"-"+mysplit[1]
+ mysettings["PN"] = mysplit[0]
+ mysettings["PV"] = mysplit[1]
+ mysettings["PR"] = mysplit[2]
+
+ if mydo != "depend":
+ try:
+ mysettings["INHERITED"], mysettings["RESTRICT"] = db[root][tree].dbapi.aux_get( \
+ mycpv,["INHERITED","RESTRICT"])
+ mysettings["PORTAGE_RESTRICT"]=string.join(flatten(portage_dep.use_reduce(portage_dep.paren_reduce( \
+ mysettings["RESTRICT"]), uselist=mysettings["USE"].split())),' ')
+ except SystemExit, e:
+ raise
+ except:
+ pass
+
+ if mysplit[2] == "r0":
+ mysettings["PVR"]=mysplit[1]
+ else:
+ mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
+
+ mysettings["SLOT"]=""
+
+ if mysettings.has_key("PATH"):
+ mysplit=string.split(mysettings["PATH"],":")
+ else:
+ mysplit=[]
+ if PORTAGE_BIN_PATH not in mysplit:
+ mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
+
+
+ mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
+ mysettings["HOME"] = mysettings["BUILD_PREFIX"]+"/homedir"
+ mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/portage-pkg"
+ mysettings["BUILDDIR"] = mysettings["BUILD_PREFIX"]+"/"+mysettings["PF"]
+
+ mysettings["PORTAGE_BASHRC"] = EBUILD_SH_ENV_FILE
+
+ #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
+ if (mydo!="depend") or not mysettings.has_key("KV"):
+ mykv,err1=ExtractKernelVersion(root+"usr/src/linux")
+ if mykv:
+ # Regular source tree
+ mysettings["KV"]=mykv
+ else:
+ mysettings["KV"]=""
+
+ if (mydo!="depend") or not mysettings.has_key("KVERS"):
+ myso=os.uname()[2]
+ mysettings["KVERS"]=myso[1]
+
+
+ # get possible slot information from the deps file
+ if mydo=="depend":
+ if mysettings.has_key("PORTAGE_DEBUG") and mysettings["PORTAGE_DEBUG"]=="1":
+ # XXX: This needs to use a FD for saving the output into a file.
+ # XXX: Set this up through spawn
+ pass
+ writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
+ if dbkey:
+ mysettings["dbkey"] = dbkey
+ else:
+ mysettings["dbkey"] = mysettings.depcachedir+"/aux_db_key_temp"
+
+ retval = spawn(EBUILD_SH_BINARY+" depend",mysettings)
+ return retval
+
+ logfile=None
+ # Build directory creation isn't required for any of these.
+ if mydo not in ["fetch","digest","manifest"]:
+
+ if not os.path.exists(mysettings["BUILD_PREFIX"]):
+ os.makedirs(mysettings["BUILD_PREFIX"])
+ if (os.getuid() == 0):
+ os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
+ os.chmod(mysettings["BUILD_PREFIX"],00775)
+
+ # Should be ok again to set $T, as sandbox does not depend on it
+ mysettings["T"]=mysettings["BUILDDIR"]+"/temp"
+ if cleanup or mydo=="clean":
+ if os.path.exists(mysettings["T"]):
+ shutil.rmtree(mysettings["T"])
+ if not os.path.exists(mysettings["T"]):
+ os.makedirs(mysettings["T"])
+ if (os.getuid() == 0):
+ os.chown(mysettings["T"],portage_uid,portage_gid)
+ os.chmod(mysettings["T"],02770)
+
+ try: # XXX: negative RESTRICT
+ if not (("nouserpriv" in string.split(mysettings["PORTAGE_RESTRICT"])) or \
+ ("userpriv" in string.split(mysettings["PORTAGE_RESTRICT"]))):
+ if ("userpriv" in features) and (portage_uid and portage_gid):
+ if (secpass==2):
+ if os.path.exists(mysettings["HOME"]):
+ # XXX: Potentially bad, but held down by HOME replacement above.
+ spawn("rm -Rf "+mysettings["HOME"],mysettings, free=1)
+ if not os.path.exists(mysettings["HOME"]):
+ os.makedirs(mysettings["HOME"])
+ elif ("userpriv" in features):
+ print "!!! Disabling userpriv from features... Portage UID/GID not valid."
+ del features[features.index("userpriv")]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Couldn't empty HOME:",mysettings["HOME"]
+ print "!!!",e
+
+ try:
+ # no reason to check for depend since depend returns above.
+ if not os.path.exists(mysettings["BUILD_PREFIX"]):
+ os.makedirs(mysettings["BUILD_PREFIX"])
+ if (os.getuid() == 0):
+ os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
+ if not os.path.exists(mysettings["BUILDDIR"]):
+ os.makedirs(mysettings["BUILDDIR"])
+ if (os.getuid() == 0):
+ os.chown(mysettings["BUILDDIR"],portage_uid,portage_gid)
+ except OSError, e:
+ print "!!! File system problem. (ReadOnly? Out of space?)"
+ print "!!! Perhaps: rm -Rf",mysettings["BUILD_PREFIX"]
+ print "!!!",str(e)
+ return 1
+
+ try:
+ if not os.path.exists(mysettings["HOME"]):
+ os.makedirs(mysettings["HOME"])
+ if (os.getuid() == 0):
+ os.chown(mysettings["HOME"],portage_uid,portage_gid)
+ os.chmod(mysettings["HOME"],02770)
+ except OSError, e:
+ print "!!! File system problem. (ReadOnly? Out of space?)"
+ print "!!! Failed to create fake home directory in BUILDDIR"
+ print "!!!",str(e)
+ return 1
+
+ try:
+ if ("ccache" in features):
+ if (not mysettings.has_key("CCACHE_DIR")) or (mysettings["CCACHE_DIR"]==""):
+ mysettings["CCACHE_DIR"]=mysettings["PORTAGE_TMPDIR"]+"/ccache"
+ if not os.path.exists(mysettings["CCACHE_DIR"]):
+ os.makedirs(mysettings["CCACHE_DIR"])
+ mystat = os.stat(mysettings["CCACHE_DIR"])
+ if ("userpriv" in features):
+ if mystat[stat.ST_UID] != portage_gid or ((mystat[stat.ST_MODE]&02070)!=02070):
+ writemsg("* Adjusting permissions on ccache in %s\n" % mysettings["CCACHE_DIR"])
+ spawn("chgrp -R "+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("chown "+str(portage_uid)+":"+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("chmod -R ug+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("find "+mysettings["CCACHE_DIR"]+" -type d -exec chmod g+s \{\} \;", mysettings, free=1)
+ else:
+ if mystat[stat.ST_UID] != 0 or ((mystat[stat.ST_MODE]&02070)!=02070):
+ writemsg("* Adjusting permissions on ccache in %s\n" % mysettings["CCACHE_DIR"])
+ spawn("chgrp -R "+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("chown 0:"+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("chmod -R ug+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("find "+mysettings["CCACHE_DIR"]+" -type d -exec chmod g+s \{\} \;", mysettings, free=1)
+ except OSError, e:
+ print "!!! File system problem. (ReadOnly? Out of space?)"
+ print "!!! Perhaps: rm -Rf",mysettings["BUILD_PREFIX"]
+ print "!!!",str(e)
+ return 1
+
+ #try:
+ # mystat=os.stat(mysettings["CCACHE_DIR"])
+ # if (mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02070)!=02070):
+ # print "*** Adjusting ccache permissions for portage user..."
+ # os.chown(mysettings["CCACHE_DIR"],portage_uid,portage_gid)
+ # os.chmod(mysettings["CCACHE_DIR"],02770)
+ # spawn("chown -R "+str(portage_uid)+":"+str(portage_gid)+" "+mysettings["CCACHE_DIR"],mysettings, free=1)
+ # spawn("chmod -R g+rw "+mysettings["CCACHE_DIR"],mysettings, free=1)
+ #except SystemExit, e:
+ # raise
+ #except:
+ # pass
+
+ if "distcc" in features:
+ try:
+ if (not mysettings.has_key("DISTCC_DIR")) or (mysettings["DISTCC_DIR"]==""):
+ mysettings["DISTCC_DIR"]=mysettings["PORTAGE_TMPDIR"]+"/portage/.distcc"
+ if not os.path.exists(mysettings["DISTCC_DIR"]):
+ os.makedirs(mysettings["DISTCC_DIR"])
+ os.chown(mysettings["DISTCC_DIR"],portage_uid,portage_gid)
+ os.chmod(mysettings["DISTCC_DIR"],02775)
+ for x in ("/lock", "/state"):
+ if not os.path.exists(mysettings["DISTCC_DIR"]+x):
+ os.mkdir(mysettings["DISTCC_DIR"]+x)
+ os.chown(mysettings["DISTCC_DIR"]+x,portage_uid,portage_gid)
+ os.chmod(mysettings["DISTCC_DIR"]+x,02775)
+ except OSError, e:
+ writemsg("\n!!! File system problem when setting DISTCC_DIR directory permissions.\n")
+ writemsg( "!!! DISTCC_DIR="+str(mysettings["DISTCC_DIR"]+"\n"))
+ writemsg( "!!! "+str(e)+"\n\n")
+ time.sleep(5)
+ features.remove("distcc")
+ mysettings["DISTCC_DIR"]=""
+
+ mysettings["WORKDIR"]=mysettings["BUILDDIR"]+"/work"
+ mysettings["D"]=mysettings["BUILDDIR"]+"/image/"
+
+ if mysettings.has_key("PORT_LOGDIR"):
+ if os.access(mysettings["PORT_LOGDIR"]+"/",os.W_OK):
+ try:
+ os.chown(mysettings["PORT_LOGDIR"],portage_uid,portage_gid)
+ os.chmod(mysettings["PORT_LOGDIR"],02770)
+ if not mysettings.has_key("LOG_PF") or (mysettings["LOG_PF"] != mysettings["PF"]):
+ mysettings["LOG_PF"]=mysettings["PF"]
+ mysettings["LOG_COUNTER"]=str(db[myroot]["vartree"].dbapi.get_counter_tick_core("/"))
+ logfile="%s/%s-%s.log" % (mysettings["PORT_LOGDIR"],mysettings["LOG_COUNTER"],mysettings["LOG_PF"])
+ except ValueError, e:
+ mysettings["PORT_LOGDIR"]=""
+ print "!!! Unable to chown/chmod PORT_LOGDIR. Disabling logging."
+ print "!!!",e
+ else:
+ print "!!! Cannot create log... No write access / Does not exist"
+ print "!!! PORT_LOGDIR:",mysettings["PORT_LOGDIR"]
+ mysettings["PORT_LOGDIR"]=""
+
+ if mydo=="unmerge":
+ return unmerge(mysettings["CATEGORY"],mysettings["PF"],myroot,mysettings)
+
+ # if any of these are being called, handle them -- running them out of the sandbox -- and stop now.
+ if mydo=="clean":
+ logfile=None
+ if mydo in ["help","clean","setup"]:
+ return spawn(EBUILD_SH_BINARY+" "+mydo,mysettings,debug=debug,free=1,logfile=logfile)
+ elif mydo in ["prerm","postrm","preinst","postinst","config"]:
+ mysettings.load_infodir(pkg_dir)
+ return spawn(EBUILD_SH_BINARY+" "+mydo,mysettings,debug=debug,free=1,logfile=logfile)
+
+ try:
+ mysettings["SLOT"],mysettings["RESTRICT"] = db["/"]["porttree"].dbapi.aux_get(mycpv,["SLOT","RESTRICT"])
+ except (IOError,KeyError):
+ print red("doebuild():")+" aux_get() error reading "+mycpv+"; aborting."
+ sys.exit(1)
+
+ newuris, alist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=mysettings)
+ alluris, aalist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=mysettings,all=1)
+ mysettings["A"]=string.join(alist," ")
+ mysettings["AA"]=string.join(aalist," ")
+ if ("mirror" in features) or fetchall:
+ fetchme=alluris[:]
+ checkme=aalist[:]
+ elif mydo=="digest":
+ fetchme=alluris[:]
+ checkme=aalist[:]
+ digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
+ if os.path.exists(digestfn):
+ mydigests=digestParseFile(digestfn)
+ if mydigests:
+ for x in mydigests:
+ while x in checkme:
+ i = checkme.index(x)
+ del fetchme[i]
+ del checkme[i]
+ else:
+ fetchme=newuris[:]
+ checkme=alist[:]
+
+ try:
+ if not os.path.exists(mysettings["DISTDIR"]):
+ os.makedirs(mysettings["DISTDIR"])
+ if not os.path.exists(mysettings["DISTDIR"]+"/cvs-src"):
+ os.makedirs(mysettings["DISTDIR"]+"/cvs-src")
+ except OSError, e:
+ print "!!! File system problem. (Bad Symlink?)"
+ print "!!! Fetching may fail:",str(e)
+
+ try:
+ mystat=os.stat(mysettings["DISTDIR"]+"/cvs-src")
+ if ((mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02770)!=02770)) and not listonly:
+ print "*** Adjusting cvs-src permissions for portage user..."
+ os.chown(mysettings["DISTDIR"]+"/cvs-src",0,portage_gid)
+ os.chmod(mysettings["DISTDIR"]+"/cvs-src",02770)
+ spawn("chgrp -R "+str(portage_gid)+" "+mysettings["DISTDIR"]+"/cvs-src", free=1)
+ spawn("chmod -R g+rw "+mysettings["DISTDIR"]+"/cvs-src", free=1)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+
+ if mydo!="manifest" and not fetch(fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
+ return 1
+
+ if mydo=="fetch" and listonly:
+ return 0
+
+ if "digest" in features:
+ #generate digest if it doesn't exist.
+ if mydo=="digest":
+ return (not digestgen(aalist,mysettings,overwrite=1))
+ else:
+ digestgen(aalist,mysettings,overwrite=0)
+ elif mydo=="digest":
+ #since we are calling "digest" directly, recreate the digest even if it already exists
+ return (not digestgen(aalist,mysettings,overwrite=1))
+ if mydo=="manifest":
+ return (not digestgen(aalist,mysettings,overwrite=1,manifestonly=1))
+
+ if not digestcheck(checkme, mysettings, ("strict" in features)):
+ return 1
+
+ if mydo=="fetch":
+ return 0
+
+ #initial dep checks complete; time to process main commands
+
+ nosandbox=(("userpriv" in features) and ("usersandbox" not in features) and \
+ ("userpriv" not in mysettings["RESTRICT"]) and ("nouserpriv" not in mysettings["RESTRICT"]))
+ if nosandbox and ("userpriv" not in features or "userpriv" in mysettings["RESTRICT"] or \
+ "nouserpriv" in mysettings["RESTRICT"]):
+ nosandbox = ("sandbox" not in features and "usersandbox" not in features)
+ actionmap={
+ "depend": { "args":(0,1)}, # sandbox / portage
+ "setup": { "args":(1,0)}, # without / root
+ "unpack": {"dep":"setup", "args":(0,1)}, # sandbox / portage
+ "compile": {"dep":"unpack", "args":(nosandbox,1)}, # optional / portage
+ "test": {"dep":"compile", "args":(nosandbox,1)}, # optional / portage
+ "install": {"dep":"test", "args":(0,0)}, # sandbox / root
+ "rpm": {"dep":"install", "args":(0,0)}, # sandbox / root
+ "package": {"dep":"install", "args":(0,0)}, # sandbox / root
+ }
+
+ if mydo in actionmap.keys():
+ if mydo=="package":
+ for x in ["","/"+mysettings["CATEGORY"],"/All"]:
+ if not os.path.exists(mysettings["PKGDIR"]+x):
+ os.makedirs(mysettings["PKGDIR"]+x)
+ # REBUILD CODE FOR TBZ2 --- XXXX
+ return spawnebuild(mydo,actionmap,mysettings,debug,logfile=logfile)
+ elif mydo=="qmerge":
+ #check to ensure install was run. this *only* pops up when users forget it and are using ebuild
+ if not os.path.exists(mysettings["BUILDDIR"]+"/.installed"):
+ print "!!! mydo=qmerge, but install phase hasn't been ran"
+ sys.exit(1)
+ #qmerge is specifically not supposed to do a runtime dep check
+ return merge(mysettings["CATEGORY"],mysettings["PF"],mysettings["D"],mysettings["BUILDDIR"]+"/build-info",myroot,mysettings)
+ elif mydo=="merge":
+ retval=spawnebuild("install",actionmap,mysettings,debug,alwaysdep=1,logfile=logfile)
+ if retval:
+ return retval
+ return merge(mysettings["CATEGORY"],mysettings["PF"],mysettings["D"],mysettings["BUILDDIR"]+"/build-info",myroot,mysettings,myebuild=mysettings["EBUILD"])
+ else:
+ print "!!! Unknown mydo:",mydo
+ sys.exit(1)
+
+expandcache={}
+
+def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
+ """moves a file from src to dest, preserving all permissions and attributes; mtime will
+ be preserved even when moving across filesystems. Returns true on success and false on
+ failure. Move is atomic."""
+ #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
+ global lchown
+
+ try:
+ if not sstat:
+ sstat=os.lstat(src)
+ if bsd_chflags:
+ sflags=bsd_chflags.lgetflags(src)
+ if sflags < 0:
+ # Problem getting flags...
+ writemsg("!!! Couldn't get flags for "+dest+"\n")
+ return None
+
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Stating source file failed... movefile()"
+ print "!!!",e
+ return None
+
+ destexists=1
+ try:
+ dstat=os.lstat(dest)
+ except SystemExit, e:
+ raise
+ except:
+ dstat=os.lstat(os.path.dirname(dest))
+ destexists=0
+
+ if bsd_chflags:
+ # Check that we can actually unset schg etc flags...
+ # Clear the flags on source and destination; we'll reinstate them after merging
+ if(destexists):
+ if bsd_chflags.lchflags(dest, 0) < 0:
+ writemsg("!!! Couldn't clear flags on file being merged: \n ")
+ # We might have an immutable flag on the parent dir; save and clear.
+ pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
+ bsd_chflags.lchflags(os.path.dirname(dest), 0)
+
+ # Don't bother checking the return value here; if it fails then the next line will catch it.
+ bsd_chflags.lchflags(src, 0)
+
+ if bsd_chflags.lhasproblems(src)>0 or (destexists and bsd_chflags.lhasproblems(dest)>0) or bsd_chflags.lhasproblems(os.path.dirname(dest))>0:
+ # This is bad: we can't merge the file with these flags set.
+ writemsg("!!! Can't merge file "+dest+" because of flags set\n")
+ return None
+
+ if destexists:
+ if stat.S_ISLNK(dstat[stat.ST_MODE]):
+ try:
+ os.unlink(dest)
+ destexists=0
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ try:
+ target=os.readlink(src)
+ if mysettings and mysettings["D"]:
+ if target.find(mysettings["D"])==0:
+ target=target[len(mysettings["D"]):]
+ if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+ os.unlink(dest)
+ if selinux_enabled:
+ sid = selinux.get_lsid(src)
+ selinux.secure_symlink(target,dest,sid)
+ else:
+ os.symlink(target,dest)
+ lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ if bsd_chflags:
+ # Restore the flags we saved before moving
+ if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
+ writemsg("!!! Couldn't restore flags ("+str(flags)+") on " + dest+":\n")
+ writemsg("!!! %s\n" % str(e))
+ return None
+ return os.lstat(dest)[stat.ST_MTIME]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! failed to properly create symlink:"
+ print "!!!",dest,"->",target
+ print "!!!",e
+ return None
+
+ renamefailed=1
+ if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
+ try:
+ if selinux_enabled:
+ ret=selinux.secure_rename(src,dest)
+ else:
+ ret=os.rename(src,dest)
+ renamefailed=0
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ import errno
+ if e[0]!=errno.EXDEV:
+ # Some random error.
+ print "!!! Failed to move",src,"to",dest
+ print "!!!",e
+ return None
+ # Invalid cross-device-link 'bind' mounted or actually Cross-Device
+ if renamefailed:
+ didcopy=0
+ if stat.S_ISREG(sstat[stat.ST_MODE]):
+ try: # For safety copy then move it over.
+ if selinux_enabled:
+ selinux.secure_copy(src,dest+"#new")
+ selinux.secure_rename(dest+"#new",dest)
+ else:
+ shutil.copyfile(src,dest+"#new")
+ os.rename(dest+"#new",dest)
+ didcopy=1
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print '!!! copy',src,'->',dest,'failed.'
+ print "!!!",e
+ return None
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ if selinux_enabled:
+ a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
+ else:
+ a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
+ if a[0]!=0:
+ print "!!! Failed to move special file:"
+ print "!!! '"+src+"' to '"+dest+"'"
+ print "!!!",a
+ return None # failure
+ try:
+ if didcopy:
+ lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
+ os.unlink(src)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Failed to chown/chmod/unlink in movefile()"
+ print "!!!",dest
+ print "!!!",e
+ return None
+
+ if newmtime:
+ os.utime(dest,(newmtime,newmtime))
+ else:
+ os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
+ newmtime=sstat[stat.ST_MTIME]
+
+ if bsd_chflags:
+ # Restore the flags we saved before moving
+ if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
+ writemsg("!!! Couldn't restore flags ("+str(sflags)+") on " + dest+":\n")
+ return None
+
+ return newmtime
+
+def merge(mycat,mypkg,pkgloc,infloc,myroot,mysettings,myebuild=None):
+ mylink=dblink(mycat,mypkg,myroot,mysettings)
+ return mylink.merge(pkgloc,infloc,myroot,myebuild)
+
+def unmerge(cat,pkg,myroot,mysettings,mytrimworld=1):
+ mylink=dblink(cat,pkg,myroot,mysettings)
+ if mylink.exists():
+ mylink.unmerge(trimworld=mytrimworld,cleanup=1)
+ mylink.delete()
+
+def relparse(myver):
+ "converts last version part into three components"
+ number=0
+ suffix=0
+ endtype=0
+ endnumber=0
+
+ mynewver=string.split(myver,"_")
+ myver=mynewver[0]
+
+ #normal number or number with letter at end
+ divider=len(myver)-1
+ if myver[divider:] not in "1234567890":
+ #letter at end
+ suffix=ord(myver[divider:])
+ number=string.atof(myver[0:divider])
+ else:
+ number=string.atof(myver)
+
+ if len(mynewver)==2:
+ #an endversion
+ for x in endversion_keys:
+ elen=len(x)
+ if mynewver[1][:elen] == x:
+ match=1
+ endtype=endversion[x]
+ try:
+ endnumber=string.atof(mynewver[1][elen:])
+ except SystemExit, e:
+ raise
+ except:
+ endnumber=0
+ break
+ return [number,suffix,endtype,endnumber]
+
+#returns 1 if valid version string, else 0
+# valid string in format: <v1>.<v2>...<vx>[a-z,_{endversion}[vy]]
+# ververify doesn't do package rev.
+
+vercache={}
+def ververify(myorigval,silent=1):
+ try:
+ return vercache[myorigval]
+ except KeyError:
+ pass
+ if len(myorigval)==0:
+ if not silent:
+ print "!!! Name error: package contains empty \"-\" part."
+ return 0
+ myval=string.split(myorigval,'.')
+ if len(myval)==0:
+ if not silent:
+ print "!!! Name error: empty version string."
+ vercache[myorigval]=0
+ return 0
+ #all but the last version must be a numeric
+ for x in myval[:-1]:
+ if not len(x):
+ if not silent:
+ print "!!! Name error in",myorigval+": two decimal points in a row"
+ vercache[myorigval]=0
+ return 0
+ try:
+ foo=int(x)
+ except SystemExit, e:
+ raise
+ except:
+ if not silent:
+ print "!!! Name error in",myorigval+": \""+x+"\" is not a valid version component."
+ vercache[myorigval]=0
+ return 0
+ if not len(myval[-1]):
+ if not silent:
+ print "!!! Name error in",myorigval+": two decimal points in a row"
+ vercache[myorigval]=0
+ return 0
+ try:
+ foo=int(myval[-1])
+ vercache[myorigval]=1
+ return 1
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ #ok, our last component is not a plain number or blank, let's continue
+ if myval[-1][-1] in string.lowercase:
+ try:
+ foo=int(myval[-1][:-1])
+ vercache[myorigval]=1
+ return 1
+ # 1a, 2.0b, etc.
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ #ok, maybe we have a 1_alpha or 1_beta2; let's see
+ #ep="endpart"
+ ep=string.split(myval[-1],"_")
+ if len(ep)!=2:
+ if not silent:
+ print "!!! Name error in",myorigval
+ vercache[myorigval]=0
+ return 0
+ try:
+ foo=int(ep[0][-1])
+ chk=ep[0]
+ except SystemExit, e:
+ raise
+ except:
+ # because it's ok last char is not numeric. example: foo-1.0.0a_pre1
+ chk=ep[0][:-1]
+
+ try:
+ foo=int(chk)
+ except SystemExit, e:
+ raise
+ except:
+ #this needs to be numeric or numeric+single letter,
+ #i.e. the "1" in "1_alpha" or "1a_alpha"
+ if not silent:
+ print "!!! Name error in",myorigval+": characters before _ must be numeric or numeric+single letter"
+ vercache[myorigval]=0
+ return 0
+ for mye in endversion_keys:
+ if ep[1][0:len(mye)]==mye:
+ if len(mye)==len(ep[1]):
+ #no trailing numeric; ok
+ vercache[myorigval]=1
+ return 1
+ else:
+ try:
+ foo=int(ep[1][len(mye):])
+ vercache[myorigval]=1
+ return 1
+ except SystemExit, e:
+ raise
+ except:
+ #if no endversions work, *then* we return 0
+ pass
+ if not silent:
+ print "!!! Name error in",myorigval
+ vercache[myorigval]=0
+ return 0
+
+def isvalidatom(atom):
+ mycpv_cps = catpkgsplit(dep_getcpv(atom))
+ operator = get_operator(atom)
+ if operator:
+ if mycpv_cps and mycpv_cps[0] != "null":
+ # >=cat/pkg-1.0
+ return 1
+ else:
+ # >=cat/pkg or >=pkg-1.0 (no category)
+ return 0
+ if mycpv_cps:
+ # cat/pkg-1.0
+ return 0
+
+ if (len(string.split(atom, '/'))==2):
+ # cat/pkg
+ return 1
+ else:
+ return 0
+
+def isjustname(mypkg):
+ myparts=string.split(mypkg,'-')
+ for x in myparts:
+ if ververify(x):
+ return 0
+ return 1
+
+iscache={}
+def isspecific(mypkg):
+ "now supports packages with no category"
+ try:
+ return iscache[mypkg]
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ mysplit=string.split(mypkg,"/")
+ if not isjustname(mysplit[-1]):
+ iscache[mypkg]=1
+ return 1
+ iscache[mypkg]=0
+ return 0
+
+# This function can be used as a package verification function, i.e.
+# "pkgsplit("foo-1.2-1") will return None if foo-1.2-1 isn't a valid
+# package (with version) name. If it is a valid name, pkgsplit will
+# return a list containing: [ pkgname, pkgversion(norev), pkgrev ].
+# For foo-1.2-1, this list would be [ "foo", "1.2", "1" ]. For
+# Mesa-3.0, this list would be [ "Mesa", "3.0", "0" ].
+pkgcache={}
+
+def pkgsplit(mypkg,silent=1):
+ try:
+ if not pkgcache[mypkg]:
+ return None
+ return pkgcache[mypkg][:]
+ except KeyError:
+ pass
+ myparts=string.split(mypkg,'-')
+ if len(myparts)<2:
+ if not silent:
+ print "!!! Name error in",mypkg+": missing a version or name part."
+ pkgcache[mypkg]=None
+ return None
+ for x in myparts:
+ if len(x)==0:
+ if not silent:
+ print "!!! Name error in",mypkg+": empty \"-\" part."
+ pkgcache[mypkg]=None
+ return None
+ #verify rev
+ revok=0
+ myrev=myparts[-1]
+ if len(myrev) and myrev[0]=="r":
+ try:
+ int(myrev[1:])
+ revok=1
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ if revok:
+ if ververify(myparts[-2]):
+ if len(myparts)==2:
+ pkgcache[mypkg]=None
+ return None
+ else:
+ for x in myparts[:-2]:
+ if ververify(x):
+ pkgcache[mypkg]=None
+ return None
+ #names can't have versiony looking parts
+ myval=[string.join(myparts[:-2],"-"),myparts[-2],myparts[-1]]
+ pkgcache[mypkg]=myval
+ return myval
+ else:
+ pkgcache[mypkg]=None
+ return None
+
+ elif ververify(myparts[-1],silent=silent):
+ if len(myparts)==1:
+ if not silent:
+ print "!!! Name error in",mypkg+": missing name part."
+ pkgcache[mypkg]=None
+ return None
+ else:
+ for x in myparts[:-1]:
+ if ververify(x):
+ if not silent:
+ print "!!! Name error in",mypkg+": multiple version parts."
+ pkgcache[mypkg]=None
+ return None
+ myval=[string.join(myparts[:-1],"-"),myparts[-1],"r0"]
+ pkgcache[mypkg]=myval[:]
+ return myval
+ else:
+ pkgcache[mypkg]=None
+ return None
+
+def getCPFromCPV(mycpv):
+ """Calls pkgsplit on a cpv and returns only the cp."""
+ return pkgsplit(mycpv)[0]
+
+catcache={}
+def catpkgsplit(mydata,silent=1):
+ "returns [cat, pkgname, version, rev ]"
+ try:
+ if not catcache[mydata]:
+ return None
+ return catcache[mydata][:]
+ except KeyError:
+ pass
+ mysplit=mydata.split("/")
+ p_split=None
+ if len(mysplit)==1:
+ retval=["null"]
+ p_split=pkgsplit(mydata,silent=silent)
+ elif len(mysplit)==2:
+ retval=[mysplit[0]]
+ p_split=pkgsplit(mysplit[1],silent=silent)
+ if not p_split:
+ catcache[mydata]=None
+ return None
+ retval.extend(p_split)
+ catcache[mydata]=retval
+ return retval
+
+# vercmp:
+# This takes two version strings and returns an integer to tell you whether
+# the versions are the same, val1>val2 or val2>val1.
+vcmpcache={}
+def vercmp(val1,val2):
+ if val1==val2:
+ #quick short-circuit
+ return 0
+ valkey=val1+" "+val2
+ try:
+ return vcmpcache[valkey]
+ try:
+ return -vcmpcache[val2+" "+val1]
+ except KeyError:
+ pass
+ except KeyError:
+ pass
+
+ # consider 1_p2 vc 1.1
+ # after expansion will become (1_p2,0) vc (1,1)
+ # then 1_p2 is compared with 1 before 0 is compared with 1
+ # to solve the bug we need to convert it to (1,0_p2)
+ # by splitting _prepart part and adding it back _after_expansion
+ val1_prepart = val2_prepart = ''
+ if val1.count('_'):
+ val1, val1_prepart = val1.split('_', 1)
+ if val2.count('_'):
+ val2, val2_prepart = val2.split('_', 1)
+
+ # replace '-' by '.'
+ # FIXME: Is it needed? can val1/2 contain '-'?
+ val1=string.split(val1,'-')
+ if len(val1)==2:
+ val1[0]=val1[0]+"."+val1[1]
+ val2=string.split(val2,'-')
+ if len(val2)==2:
+ val2[0]=val2[0]+"."+val2[1]
+
+ val1=string.split(val1[0],'.')
+ val2=string.split(val2[0],'.')
+
+ #add back decimal point so that .03 does not become "3" !
+ for x in range(1,len(val1)):
+ if val1[x][0] == '0' :
+ val1[x]='.' + val1[x]
+ for x in range(1,len(val2)):
+ if val2[x][0] == '0' :
+ val2[x]='.' + val2[x]
+
+ # extend version numbers
+ if len(val2)<len(val1):
+ val2.extend(["0"]*(len(val1)-len(val2)))
+ elif len(val1)<len(val2):
+ val1.extend(["0"]*(len(val2)-len(val1)))
+
+ # add back _prepart tails
+ if val1_prepart:
+ val1[-1] += '_' + val1_prepart
+ if val2_prepart:
+ val2[-1] += '_' + val2_prepart
+ #The above code will extend version numbers out so they
+ #have the same number of digits.
+ for x in range(0,len(val1)):
+ cmp1=relparse(val1[x])
+ cmp2=relparse(val2[x])
+ for y in range(0,4):
+ myret=cmp1[y]-cmp2[y]
+ if myret != 0:
+ vcmpcache[valkey]=myret
+ return myret
+ vcmpcache[valkey]=0
+ return 0
+
+
+def pkgcmp(pkg1,pkg2):
+ """if returnval is less than zero, then pkg2 is newer than pkg1, zero if equal and positive if older."""
+ if pkg1[0] != pkg2[0]:
+ return None
+ mycmp=vercmp(pkg1[1],pkg2[1])
+ if mycmp>0:
+ return 1
+ if mycmp<0:
+ return -1
+ r1=int(pkg1[2][1:])
+ r2=int(pkg2[2][1:])
+ if r1>r2:
+ return 1
+ if r2>r1:
+ return -1
+ return 0
+
+def dep_parenreduce(mysplit,mypos=0):
+ "Accepts a list of strings, and converts '(' and ')' surrounded items to sub-lists"
+ while (mypos<len(mysplit)):
+ if (mysplit[mypos]=="("):
+ firstpos=mypos
+ mypos=mypos+1
+ while (mypos<len(mysplit)):
+ if mysplit[mypos]==")":
+ mysplit[firstpos:mypos+1]=[mysplit[firstpos+1:mypos]]
+ mypos=firstpos
+ break
+ elif mysplit[mypos]=="(":
+ #recurse
+ mysplit=dep_parenreduce(mysplit,mypos=mypos)
+ mypos=mypos+1
+ mypos=mypos+1
+ return mysplit
+
+def dep_opconvert(mysplit,myuse,mysettings):
+ "Does dependency operator conversion"
+
+ #check_config_instance(mysettings)
+
+ mypos=0
+ newsplit=[]
+ while mypos<len(mysplit):
+ if type(mysplit[mypos])==types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos],myuse,mysettings))
+ mypos += 1
+ elif mysplit[mypos]==")":
+ #mismatched paren, error
+ return None
+ elif mysplit[mypos]=="||":
+ if ((mypos+1)>=len(mysplit)) or (type(mysplit[mypos+1])!=types.ListType):
+ # || must be followed by paren'd list
+ return None
+ try:
+ mynew=dep_opconvert(mysplit[mypos+1],myuse,mysettings)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Unable to satisfy OR dependency:",string.join(mysplit," || ")
+ raise e
+ mynew[0:0]=["||"]
+ newsplit.append(mynew)
+ mypos += 2
+ elif mysplit[mypos][-1]=="?":
+ #uses clause, i.e "gnome? ( foo bar )"
+ #this is a quick and dirty hack so that repoman can enable all USE vars:
+ if (len(myuse)==1) and (myuse[0]=="*") and mysettings:
+ # enable it even if it's ! (for repoman) but kill it if it's
+ # an arch variable that isn't for this arch. XXX Sparc64?
+ k=mysplit[mypos][:-1]
+ if k[0]=="!":
+ k=k[1:]
+ if k not in archlist and k not in mysettings.usemask:
+ enabled=1
+ elif k in archlist:
+ if k==mysettings["ARCH"]:
+ if mysplit[mypos][0]=="!":
+ enabled=0
+ else:
+ enabled=1
+ elif mysplit[mypos][0]=="!":
+ enabled=1
+ else:
+ enabled=0
+ else:
+ enabled=0
+ else:
+ if mysplit[mypos][0]=="!":
+ myusevar=mysplit[mypos][1:-1]
+ if myusevar in myuse:
+ enabled=0
+ else:
+ enabled=1
+ else:
+ myusevar=mysplit[mypos][:-1]
+ if myusevar in myuse:
+ enabled=1
+ else:
+ enabled=0
+ if (mypos+2<len(mysplit)) and (mysplit[mypos+2]==":"):
+ #colon mode
+ if enabled:
+ #choose the first option
+ if type(mysplit[mypos+1])==types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos+1],myuse,mysettings))
+ else:
+ newsplit.append(mysplit[mypos+1])
+ else:
+ #choose the alternate option
+ if type(mysplit[mypos+1])==types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos+3],myuse,mysettings))
+ else:
+ newsplit.append(mysplit[mypos+3])
+ mypos += 4
+ else:
+ #normal use mode
+ if enabled:
+ if type(mysplit[mypos+1])==types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos+1],myuse,mysettings))
+ else:
+ newsplit.append(mysplit[mypos+1])
+ #otherwise, continue.
+ mypos += 2
+ else:
+ #normal item
+ newsplit.append(mysplit[mypos])
+ mypos += 1
+ return newsplit
+
+def dep_virtual(mysplit, mysettings):
+ "Does virtual dependency conversion"
+
+
+
+ newsplit=[]
+ for x in mysplit:
+ if type(x)==types.ListType:
+ newsplit.append(dep_virtual(x, mysettings))
+ else:
+ mykey=dep_getkey(x)
+ if mysettings.virtuals.has_key(mykey):
+ if len(mysettings.virtuals[mykey])==1:
+ a=string.replace(x, mykey, mysettings.virtuals[mykey][0])
+ else:
+ if x[0]=="!":
+ # blocker needs "and" not "or(||)".
+ a=[]
+ else:
+ a=['||']
+ for y in mysettings.virtuals[mykey]:
+ a.append(string.replace(x, mykey, y))
+ newsplit.append(a)
+ else:
+ newsplit.append(x)
+ return newsplit
+
+def dep_eval(deplist):
+ if len(deplist)==0:
+ return 1
+ if deplist[0]=="||":
+ #or list; we just need one "1"
+ for x in deplist[1:]:
+ if type(x)==types.ListType:
+ if dep_eval(x)==1:
+ return 1
+ elif x==1:
+ return 1
+ return 0
+ else:
+ for x in deplist:
+ if type(x)==types.ListType:
+ if dep_eval(x)==0:
+ return 0
+ elif x==0 or x==2:
+ return 0
+ return 1
+
+def dep_zapdeps(unreduced,reduced,vardbapi=None,use_binaries=0):
+ """Takes an unreduced and reduced deplist and removes satisfied dependencies.
+ Returned deplist contains steps that must be taken to satisfy dependencies."""
+ writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
+ if unreduced==[] or unreduced==['||'] :
+ return []
+ if unreduced[0]=="||":
+ if dep_eval(reduced):
+ #deps satisfied, return empty list.
+ return []
+ else:
+ #try to find an installed dep.
+ ### We use fakedb when --update now, so we can't use local vardbapi here.
+ ### This should be fixed in the feature.
+ ### see bug 45468.
+ ##if vardbapi:
+ ## mydbapi=vardbapi
+ ##else:
+ ## mydbapi=db[root]["vartree"].dbapi
+ mydbapi=db[root]["vartree"].dbapi
+
+ if db["/"].has_key("porttree"):
+ myportapi=db["/"]["porttree"].dbapi
+ else:
+ myportapi=None
+
+ if use_binaries and db["/"].has_key("bintree"):
+ mybinapi=db["/"]["bintree"].dbapi
+ writemsg("Using bintree...\n",2)
+ else:
+ mybinapi=None
+
+ x=1
+ candidate=[]
+ while x<len(reduced):
+ writemsg("x: %s, reduced[x]: %s\n" % (x,reduced[x]), 2)
+ if (type(reduced[x])==types.ListType):
+ newcand = dep_zapdeps(unreduced[x], reduced[x], vardbapi=vardbapi, use_binaries=use_binaries)
+ candidate.append(newcand)
+ else:
+ if (reduced[x]==False):
+ candidate.append([unreduced[x]])
+ else:
+ candidate.append([])
+ x+=1
+
+ #use installed and no-masked package(s) in portage.
+ for x in candidate:
+ match=1
+ for pkg in x:
+ if not mydbapi.match(pkg):
+ match=0
+ break
+ if myportapi:
+ if not myportapi.match(pkg):
+ match=0
+ break
+ if match:
+ writemsg("Installed match: %s\n" % (x), 2)
+ return x
+
+ # Use binary packages if available.
+ if mybinapi:
+ for x in candidate:
+ match=1
+ for pkg in x:
+ if not mybinapi.match(pkg):
+ match=0
+ break
+ else:
+ writemsg("Binary match: %s\n" % (pkg), 2)
+ if match:
+ writemsg("Binary match final: %s\n" % (x), 2)
+ return x
+
+ #use no-masked package(s) in portage tree
+ if myportapi:
+ for x in candidate:
+ match=1
+ for pkg in x:
+ if not myportapi.match(pkg):
+ match=0
+ break
+ if match:
+ writemsg("Porttree match: %s\n" % (x), 2)
+ return x
+
+ #none of the no-masked pkg, use the first one
+ writemsg("Last resort candidate: %s\n" % (candidate[0]), 2)
+ return candidate[0]
+ else:
+ if dep_eval(reduced):
+ #deps satisfied, return empty list.
+ return []
+ else:
+ returnme=[]
+ x=0
+ while x<len(reduced):
+ if type(reduced[x])==types.ListType:
+ returnme+=dep_zapdeps(unreduced[x],reduced[x], vardbapi=vardbapi, use_binaries=use_binaries)
+ else:
+ if reduced[x]==False:
+ returnme.append(unreduced[x])
+ x += 1
+ return returnme
+
+def dep_getkey(mydep):
+ if not len(mydep):
+ return mydep
+ if mydep[0]=="*":
+ mydep=mydep[1:]
+ if mydep[-1]=="*":
+ mydep=mydep[:-1]
+ if mydep[0]=="!":
+ mydep=mydep[1:]
+ if mydep[:2] in [ ">=", "<=" ]:
+ mydep=mydep[2:]
+ elif mydep[:1] in "=<>~":
+ mydep=mydep[1:]
+ if isspecific(mydep):
+ mysplit=catpkgsplit(mydep)
+ if not mysplit:
+ return mydep
+ return mysplit[0]+"/"+mysplit[1]
+ else:
+ return mydep
+
+def dep_getcpv(mydep):
+ if not len(mydep):
+ return mydep
+ if mydep[0]=="*":
+ mydep=mydep[1:]
+ if mydep[-1]=="*":
+ mydep=mydep[:-1]
+ if mydep[0]=="!":
+ mydep=mydep[1:]
+ if mydep[:2] in [ ">=", "<=" ]:
+ mydep=mydep[2:]
+ elif mydep[:1] in "=<>~":
+ mydep=mydep[1:]
+ return mydep
+
+def cpv_getkey(mycpv):
+ myslash=mycpv.split("/")
+ mysplit=pkgsplit(myslash[-1])
+ mylen=len(myslash)
+ if mylen==2:
+ return myslash[0]+"/"+mysplit[0]
+ elif mylen==1:
+ return mysplit[0]
+ else:
+ return mysplit
+
+def key_expand(mykey,mydb=None,use_cache=1):
+ mysplit=mykey.split("/")
+ if len(mysplit)==1:
+ if mydb and type(mydb)==types.InstanceType:
+ for x in settings.categories:
+ if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
+ return x+"/"+mykey
+ if virts_p.has_key(mykey):
+ return(virts_p[mykey][0])
+ return "null/"+mykey
+ elif mydb:
+ if type(mydb)==types.InstanceType:
+ if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
+ return virts[mykey][0]
+ return mykey
+
+def cpv_expand(mycpv,mydb=None,use_cache=1):
+ """Given a string (packagename or virtual) expand it into a valid
+ cat/package string. Virtuals use the mydb to determine which provided
+ virtual is a valid choice and defaults to the first element when there
+ are no installed/available candidates."""
+ myslash=mycpv.split("/")
+ mysplit=pkgsplit(myslash[-1])
+ if len(myslash)>2:
+ # this is illegal case.
+ mysplit=[]
+ mykey=mycpv
+ elif len(myslash)==2:
+ if mysplit:
+ mykey=myslash[0]+"/"+mysplit[0]
+ else:
+ mykey=mycpv
+ if mydb:
+ writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
+ if type(mydb)==types.InstanceType:
+ if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
+ writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
+ mykey_orig = mykey[:]
+ for vkey in virts[mykey]:
+ if mydb.cp_list(vkey,use_cache=use_cache):
+ mykey = vkey
+ writemsg("virts chosen: %s\n" % (mykey), 1)
+ break
+ if mykey == mykey_orig:
+ mykey=virts[mykey][0]
+ writemsg("virts defaulted: %s\n" % (mykey), 1)
+ #we only perform virtual expansion if we are passed a dbapi
+ else:
+ #specific cpv, no category, ie. "foo-1.0"
+ if mysplit:
+ myp=mysplit[0]
+ else:
+ # "foo" ?
+ myp=mycpv
+ mykey=None
+ matches=[]
+ if mydb:
+ for x in settings.categories:
+ if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
+ matches.append(x+"/"+myp)
+ if (len(matches)>1):
+ raise ValueError, matches
+ elif matches:
+ mykey=matches[0]
+
+ if not mykey and type(mydb)!=types.ListType:
+ if virts_p.has_key(myp):
+ mykey=virts_p[myp][0]
+ #again, we only perform virtual expansion if we have a dbapi (not a list)
+ if not mykey:
+ mykey="null/"+myp
+ if mysplit:
+ if mysplit[2]=="r0":
+ return mykey+"-"+mysplit[1]
+ else:
+ return mykey+"-"+mysplit[1]+"-"+mysplit[2]
+ else:
+ return mykey
+
+def dep_transform(mydep,oldkey,newkey):
+ origdep=mydep
+ if not len(mydep):
+ return mydep
+ if mydep[0]=="*":
+ mydep=mydep[1:]
+ prefix=""
+ postfix=""
+ if mydep[-1]=="*":
+ mydep=mydep[:-1]
+ postfix="*"
+ if mydep[:2] in [ ">=", "<=" ]:
+ prefix=mydep[:2]
+ mydep=mydep[2:]
+ elif mydep[:1] in "=<>~!":
+ prefix=mydep[:1]
+ mydep=mydep[1:]
+ if mydep==oldkey:
+ return prefix+newkey+postfix
+ else:
+ return origdep
+
+def dep_expand(mydep,mydb=None,use_cache=1):
+ if not len(mydep):
+ return mydep
+ if mydep[0]=="*":
+ mydep=mydep[1:]
+ prefix=""
+ postfix=""
+ if mydep[-1]=="*":
+ mydep=mydep[:-1]
+ postfix="*"
+ if mydep[:2] in [ ">=", "<=" ]:
+ prefix=mydep[:2]
+ mydep=mydep[2:]
+ elif mydep[:1] in "=<>~!":
+ prefix=mydep[:1]
+ mydep=mydep[1:]
+ return prefix+cpv_expand(mydep,mydb=mydb,use_cache=use_cache)+postfix
+
+def dep_check(depstring,mydbapi,mysettings,use="yes",mode=None,myuse=None,use_cache=1,use_binaries=0):
+ """Takes a depend string and parses the condition."""
+
+ #check_config_instance(mysettings)
+
+ if use=="all":
+ #enable everything (for repoman)
+ myusesplit=["*"]
+ elif use=="yes":
+ if myuse==None:
+ #default behavior
+ myusesplit = string.split(mysettings["USE"])
+ else:
+ myusesplit = myuse
+ # We've been given useflags to use.
+ #print "USE FLAGS PASSED IN."
+ #print myuse
+ #if "bindist" in myusesplit:
+ # print "BINDIST is set!"
+ #else:
+ # print "BINDIST NOT set."
+ else:
+ #we are being run by autouse(), don't consult USE vars yet.
+ # WE ALSO CANNOT USE SETTINGS
+ myusesplit=[]
+
+ #convert parenthesis to sublists
+ mysplit = portage_dep.paren_reduce(depstring)
+
+ if mysettings:
+ # XXX: use="all" is only used by repoman. Why would repoman checks want
+ # profile-masked USE flags to be enabled?
+ #if use=="all":
+ # mymasks=archlist[:]
+ #else:
+ mymasks=mysettings.usemask+archlist[:]
+
+ while mysettings["ARCH"] in mymasks:
+ del mymasks[mymasks.index(mysettings["ARCH"])]
+ mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,masklist=mymasks,matchall=(use=="all"),excludeall=[mysettings["ARCH"]])
+ else:
+ mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,matchall=(use=="all"))
+
+ # Do the || conversions
+ mysplit=portage_dep.dep_opconvert(mysplit)
+
+ #convert virtual dependencies to normal packages.
+ mysplit=dep_virtual(mysplit, mysettings)
+ #if mysplit==None, then we have a parse error (paren mismatch or misplaced ||)
+ #up until here, we haven't needed to look at the database tree
+
+ if mysplit==None:
+ return [0,"Parse Error (parentheses mismatch?)"]
+ elif mysplit==[]:
+ #dependencies were reduced to nothing
+ return [1,[]]
+ mysplit2=mysplit[:]
+ mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
+ if mysplit2==None:
+ return [0,"Invalid token"]
+
+ writemsg("\n\n\n", 1)
+ writemsg("mysplit: %s\n" % (mysplit), 1)
+ writemsg("mysplit2: %s\n" % (mysplit2), 1)
+ myeval=dep_eval(mysplit2)
+ writemsg("myeval: %s\n" % (myeval), 1)
+
+ if myeval:
+ return [1,[]]
+ else:
+ myzaps = dep_zapdeps(mysplit,mysplit2,vardbapi=mydbapi,use_binaries=use_binaries)
+ mylist = flatten(myzaps)
+ writemsg("myzaps: %s\n" % (myzaps), 1)
+ writemsg("mylist: %s\n" % (mylist), 1)
+ #remove duplicates
+ mydict={}
+ for x in mylist:
+ mydict[x]=1
+ writemsg("mydict: %s\n" % (mydict), 1)
+ return [1,mydict.keys()]
+
+def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
+ "Reduces the deplist to ones and zeros"
+ mypos=0
+ deplist=mydeplist[:]
+ while mypos<len(deplist):
+ if type(deplist[mypos])==types.ListType:
+ #recurse
+ deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
+ elif deplist[mypos]=="||":
+ pass
+ else:
+ mykey = dep_getkey(deplist[mypos])
+ if mysettings and mysettings.pprovideddict.has_key(mykey) and \
+ match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
+ deplist[mypos]=True
+ else:
+ if mode:
+ mydep=mydbapi.xmatch(mode,deplist[mypos])
+ else:
+ mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
+ if mydep!=None:
+ tmp=(len(mydep)>=1)
+ if deplist[mypos][0]=="!":
+ #tmp=not tmp
+ # This is ad-hoc code. We should rewrite this later.. (See #52377)
+ # The reason is that portage uses fakedb when --update option now.
+ # So portage considers that a block package doesn't exist even if it exists.
+ # Then, #52377 happens.
+ # ==== start
+ # emerge checks if it's block or not, so we can always set tmp=False.
+ # but it's not clean..
+ tmp=False
+ # ==== end
+ deplist[mypos]=tmp
+ else:
+ #encountered invalid string
+ return None
+ mypos=mypos+1
+ return deplist
+
+def getmaskingreason(mycpv):
+ global portdb
+ mysplit = catpkgsplit(mycpv)
+ if not mysplit:
+ raise ValueError("invalid CPV: %s" % mycpv)
+ if not portdb.cpv_exists(mycpv):
+ raise KeyError("CPV %s does not exist" % mycpv)
+ mycp=mysplit[0]+"/"+mysplit[1]
+
+ if settings.pmaskdict.has_key(mycp):
+ for x in settings.pmaskdict[mycp]:
+ if mycpv in portdb.xmatch("match-all", x):
+ pmaskfile = open(settings["PORTDIR"]+"/profiles/package.mask")
+ comment = ""
+ l = "\n"
+ while len(l) > 0:
+ l = pmaskfile.readline()
+ if len(l) == 0:
+ pmaskfile.close()
+ return None
+ if l[0] == "#":
+ comment += l
+ elif l == "\n":
+ comment = ""
+ elif l.strip() == x:
+ pmaskfile.close()
+ return comment
+ pmaskfile.close()
+ return None
+
+def getmaskingstatus(mycpv):
+ global portdb
+ mysplit = catpkgsplit(mycpv)
+ if not mysplit:
+ raise ValueError("invalid CPV: %s" % mycpv)
+ if not portdb.cpv_exists(mycpv):
+ raise KeyError("CPV %s does not exist" % mycpv)
+ mycp=mysplit[0]+"/"+mysplit[1]
+
+ rValue = []
+
+ # profile checking
+ revmaskdict=settings.prevmaskdict
+ if revmaskdict.has_key(mycp):
+ for x in revmaskdict[mycp]:
+ if x[0]=="*":
+ myatom = x[1:]
+ else:
+ myatom = x
+ if not match_to_list(mycpv, [myatom]):
+ rValue.append("profile")
+ break
+
+ # package.mask checking
+ maskdict=settings.pmaskdict
+ unmaskdict=settings.punmaskdict
+ if maskdict.has_key(mycp):
+ for x in maskdict[mycp]:
+ if mycpv in portdb.xmatch("match-all", x):
+ unmask=0
+ if unmaskdict.has_key(mycp):
+ for z in unmaskdict[mycp]:
+ if mycpv in portdb.xmatch("match-all",z):
+ unmask=1
+ break
+ if unmask==0:
+ rValue.append("package.mask")
+
+ # keywords checking
+ mygroups = portdb.aux_get(mycpv, ["KEYWORDS"])[0].split()
+ pgroups=groups[:]
+ myarch = settings["ARCH"]
+ pkgdict = settings.pkeywordsdict
+
+ cp = dep_getkey(mycpv)
+ if pkgdict.has_key(cp):
+ matches = match_to_list(mycpv, pkgdict[cp].keys())
+ for match in matches:
+ pgroups.extend(pkgdict[cp][match])
+
+ kmask = "missing"
+
+ for keyword in pgroups:
+ if keyword in mygroups:
+ kmask=None
+
+ if kmask:
+ fallback = None
+ for gp in mygroups:
+ if gp=="*":
+ kmask=None
+ break
+ elif gp=="-*":
+ fallback="-*"
+ elif gp=="-"+myarch:
+ kmask="-"+myarch
+ break
+ elif gp=="~"+myarch:
+ kmask="~"+myarch
+ break
+ if kmask == "missing" and fallback:
+ kmask = fallback
+
+ if kmask:
+ rValue.append(kmask+" keyword")
+ return rValue
+
+def fixdbentries(old_value, new_value, dbdir):
+ """python replacement for the fixdbentries script, replaces old_value
+ with new_value for package names in files in dbdir."""
+ for myfile in [f for f in os.listdir(dbdir) if not f == "CONTENTS"]:
+ f = open(dbdir+"/"+myfile, "r")
+ mycontent = f.read()
+ f.close()
+ if not mycontent.count(old_value):
+ continue
+ old_value = re.escape(old_value);
+ mycontent = re.sub(old_value+"$", new_value, mycontent)
+ mycontent = re.sub(old_value+"(\\s)", new_value+"\\1", mycontent)
+ mycontent = re.sub(old_value+"(-[^a-zA-Z])", new_value+"\\1", mycontent)
+ mycontent = re.sub(old_value+"([^a-zA-Z0-9-])", new_value+"\\1", mycontent)
+ f = open(dbdir+"/"+myfile, "w")
+ f.write(mycontent)
+ f.close()
+
+class packagetree:
+ def __init__(self,virtual,clone=None):
+ if clone:
+ self.tree=clone.tree.copy()
+ self.populated=clone.populated
+ self.virtual=clone.virtual
+ self.dbapi=None
+ else:
+ self.tree={}
+ self.populated=0
+ self.virtual=virtual
+ self.dbapi=None
+
+ def resolve_key(self,mykey):
+ return key_expand(mykey,mydb=self.dbapi)
+
+ def dep_nomatch(self,mypkgdep):
+ mykey=dep_getkey(mypkgdep)
+ nolist=self.dbapi.cp_list(mykey)
+ mymatch=self.dbapi.match(mypkgdep)
+ if not mymatch:
+ return nolist
+ for x in mymatch:
+ if x in nolist:
+ nolist.remove(x)
+ return nolist
+
+ def depcheck(self,mycheck,use="yes",myusesplit=None):
+ return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
+
+ def populate(self):
+ "populates the tree with values"
+ populated=1
+ pass
+
+def best(mymatches):
+ "accepts None arguments; assumes matches are valid."
+ global bestcount
+ if mymatches==None:
+ return ""
+ if not len(mymatches):
+ return ""
+ bestmatch=mymatches[0]
+ p2=catpkgsplit(bestmatch)[1:]
+ for x in mymatches[1:]:
+ p1=catpkgsplit(x)[1:]
+ if pkgcmp(p1,p2)>0:
+ bestmatch=x
+ p2=catpkgsplit(bestmatch)[1:]
+ return bestmatch
+
+def match_to_list(mypkg,mylist):
+ """(pkgname,list)
+ Searches list for entries that matches the package.
+ """
+ matches=[]
+ for x in mylist:
+ if match_from_list(x,[mypkg]):
+ if x not in matches:
+ matches.append(x)
+ return matches
+
+def best_match_to_list(mypkg,mylist):
+ """(pkgname,list)
+ Returns the most specific entry (assumed to be the longest one)
+ that matches the package given.
+ """
+ # XXX Assumption is wrong sometimes.
+ maxlen = 0
+ bestm = None
+ for x in match_to_list(mypkg,mylist):
+ if len(x) > maxlen:
+ maxlen = len(x)
+ bestm = x
+ return bestm
+
+def catsplit(mydep):
+ return mydep.split("/", 1)
+
+def get_operator(mydep):
+ """
+ returns '~', '=', '>', '<', '=*', '>=', or '<='
+ """
+ if mydep[0] == "~":
+ operator = "~"
+ elif mydep[0] == "=":
+ if mydep[-1] == "*":
+ operator = "=*"
+ else:
+ operator = "="
+ elif mydep[0] in "><":
+ if len(mydep) > 1 and mydep[1] == "=":
+ operator = mydep[0:2]
+ else:
+ operator = mydep[0]
+ else:
+ operator = None
+
+ return operator
+
+
+def match_from_list(mydep,candidate_list):
+ if mydep[0] == "!":
+ mydep = mydep[1:]
+
+ mycpv = dep_getcpv(mydep)
+ mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
+
+ if not mycpv_cps:
+ cat,pkg = catsplit(mycpv)
+ ver = None
+ rev = None
+ else:
+ cat,pkg,ver,rev = mycpv_cps
+ if mydep == mycpv:
+ raise KeyError, "Specific key requires an operator (%s) (try adding an '=')" % (mydep)
+
+ if ver and rev:
+ operator = get_operator(mydep)
+ if not operator:
+ writemsg("!!! Invalid atom: %s\n" % mydep)
+ return []
+ else:
+ operator = None
+
+ mylist = []
+
+ if operator == None:
+ for x in candidate_list:
+ xs = pkgsplit(x)
+ if xs == None:
+ if x != mycpv:
+ continue
+ elif xs[0] != mycpv:
+ continue
+ mylist.append(x)
+
+ elif operator == "=": # Exact match
+ if mycpv in candidate_list:
+ mylist = [mycpv]
+
+ elif operator == "=*": # glob match
+ # The old verion ignored _tag suffixes... This one doesn't.
+ for x in candidate_list:
+ if x[0:len(mycpv)] == mycpv:
+ mylist.append(x)
+
+ elif operator == "~": # version, any revision, match
+ for x in candidate_list:
+ xs = catpkgsplit(x)
+ if xs[0:2] != mycpv_cps[0:2]:
+ continue
+ if xs[2] != ver:
+ continue
+ mylist.append(x)
+
+ elif operator in [">", ">=", "<", "<="]:
+ for x in candidate_list:
+ try:
+ result = pkgcmp(pkgsplit(x), [cat+"/"+pkg,ver,rev])
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("\nInvalid package name: %s\n" % x)
+ sys.exit(73)
+ if result == None:
+ continue
+ elif operator == ">":
+ if result > 0:
+ mylist.append(x)
+ elif operator == ">=":
+ if result >= 0:
+ mylist.append(x)
+ elif operator == "<":
+ if result < 0:
+ mylist.append(x)
+ elif operator == "<=":
+ if result <= 0:
+ mylist.append(x)
+ else:
+ raise KeyError, "Unknown operator: %s" % mydep
+ else:
+ raise KeyError, "Unknown operator: %s" % mydep
+
+
+ return mylist
+
+
+def match_from_list_original(mydep,mylist):
+ """(dep,list)
+ Reduces the list down to those that fit the dep
+ """
+ mycpv=dep_getcpv(mydep)
+ if isspecific(mycpv):
+ cp_key=catpkgsplit(mycpv)
+ if cp_key==None:
+ return []
+ else:
+ cp_key=None
+ #Otherwise, this is a special call; we can only select out of the ebuilds specified in the specified mylist
+ if (mydep[0]=="="):
+ if cp_key==None:
+ return []
+ if mydep[-1]=="*":
+ #example: "=sys-apps/foo-1.0*"
+ try:
+ #now, we grab the version of our dependency...
+ mynewsplit=string.split(cp_key[2],'.')
+ #split it...
+ mynewsplit[-1]=`int(mynewsplit[-1])+1`
+ #and increment the last digit of the version by one.
+ #We don't need to worry about _pre and friends because they're not supported with '*' deps.
+ new_v=string.join(mynewsplit,".")+"_alpha0"
+ #new_v will be used later in the code when we do our comparisons using pkgcmp()
+ except SystemExit, e:
+ raise
+ except:
+ #erp, error.
+ return []
+ mynodes=[]
+ cmp1=cp_key[1:]
+ cmp1[1]=cmp1[1]+"_alpha0"
+ cmp2=[cp_key[1],new_v,"r0"]
+ for x in mylist:
+ cp_x=catpkgsplit(x)
+ if cp_x==None:
+ #hrm, invalid entry. Continue.
+ continue
+ #skip entries in our list that do not have matching categories
+ if cp_key[0]!=cp_x[0]:
+ continue
+ # ok, categories match. Continue to next step.
+ if ((pkgcmp(cp_x[1:],cmp1)>=0) and (pkgcmp(cp_x[1:],cmp2)<0)):
+ # entry is >= the version in specified in our dependency, and <= the version in our dep + 1; add it:
+ mynodes.append(x)
+ return mynodes
+ else:
+ # Does our stripped key appear literally in our list? If so, we have a match; if not, we don't.
+ if mycpv in mylist:
+ return [mycpv]
+ else:
+ return []
+ elif (mydep[0]==">") or (mydep[0]=="<"):
+ if cp_key==None:
+ return []
+ if (len(mydep)>1) and (mydep[1]=="="):
+ cmpstr=mydep[0:2]
+ else:
+ cmpstr=mydep[0]
+ mynodes=[]
+ for x in mylist:
+ cp_x=catpkgsplit(x)
+ if cp_x==None:
+ #invalid entry; continue.
+ continue
+ if cp_key[0]!=cp_x[0]:
+ continue
+ if eval("pkgcmp(cp_x[1:],cp_key[1:])"+cmpstr+"0"):
+ mynodes.append(x)
+ return mynodes
+ elif mydep[0]=="~":
+ if cp_key==None:
+ return []
+ myrev=-1
+ for x in mylist:
+ cp_x=catpkgsplit(x)
+ if cp_x==None:
+ #invalid entry; continue
+ continue
+ if cp_key[0]!=cp_x[0]:
+ continue
+ if cp_key[2]!=cp_x[2]:
+ #if version doesn't match, skip it
+ continue
+ myint = int(cp_x[3][1:])
+ if myint > myrev:
+ myrev = myint
+ mymatch = x
+ if myrev == -1:
+ return []
+ else:
+ return [mymatch]
+ elif cp_key==None:
+ if mydep[0]=="!":
+ return []
+ #we check ! deps in emerge itself, so always returning [] is correct.
+ mynodes=[]
+ cp_key=mycpv.split("/")
+ for x in mylist:
+ cp_x=catpkgsplit(x)
+ if cp_x==None:
+ #invalid entry; continue
+ continue
+ if cp_key[0]!=cp_x[0]:
+ continue
+ if cp_key[1]!=cp_x[1]:
+ continue
+ mynodes.append(x)
+ return mynodes
+ else:
+ return []
+
+
+class portagetree:
+ def __init__(self,root="/",virtual=None,clone=None):
+ global portdb
+ if clone:
+ self.root=clone.root
+ self.portroot=clone.portroot
+ self.pkglines=clone.pkglines
+ else:
+ self.root=root
+ self.portroot=settings["PORTDIR"]
+ self.virtual=virtual
+ self.dbapi=portdb
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method"
+ mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
+ if mymatch==None:
+ return ""
+ return mymatch
+
+ def dep_match(self,mydep):
+ "compatibility method"
+ mymatch=self.dbapi.xmatch("match-visible",mydep)
+ if mymatch==None:
+ return []
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getname(self,pkgname):
+ "returns file location for this particular package (DEPRECATED)"
+ if not pkgname:
+ return ""
+ mysplit=string.split(pkgname,"/")
+ psplit=pkgsplit(mysplit[1])
+ return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
+
+ def resolve_specific(self,myspec):
+ cps=catpkgsplit(myspec)
+ if not cps:
+ return None
+ mykey=key_expand(cps[0]+"/"+cps[1],mydb=self.dbapi)
+ mykey=mykey+"-"+cps[2]
+ if cps[3]!="r0":
+ mykey=mykey+"-"+cps[3]
+ return mykey
+
+ def depcheck(self,mycheck,use="yes",myusesplit=None):
+ return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+ return myslot
+
+
+class dbapi:
+ def __init__(self):
+ pass
+
+ def close_caches(self):
+ pass
+
+ def cp_list(self,cp,use_cache=1):
+ return
+
+ def aux_get(self,mycpv,mylist):
+ "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
+ raise NotImplementedError
+
+ def match(self,origdep,use_cache=1):
+ mydep=dep_expand(origdep,mydb=self)
+ mykey=dep_getkey(mydep)
+ mycat=mykey.split("/")[0]
+ return match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
+
+ def match2(self,mydep,mykey,mylist):
+ writemsg("DEPRECATED: dbapi.match2\n")
+ match_from_list(mydep,mylist)
+
+ def counter_tick(self,myroot,mycpv=None):
+ return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
+
+ def get_counter_tick_core(self,myroot,mycpv=None):
+ return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
+
+ def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
+ "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
+ cpath=myroot+"var/cache/edb/counter"
+ changed=0
+ min_counter = 0
+ if mycpv:
+ mysplit = pkgsplit(mycpv)
+ for x in self.match(mysplit[0],use_cache=0):
+ # fixed bug #41062
+ if x==mycpv:
+ continue
+ try:
+ old_counter = long(self.aux_get(x,["COUNTER"])[0])
+ writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
+ except SystemExit, e:
+ raise
+ except:
+ old_counter = 0
+ writemsg("!!! BAD COUNTER in '%s'\n" % (x))
+ if old_counter > min_counter:
+ min_counter = old_counter
+
+ # We write our new counter value to a new file that gets moved into
+ # place to avoid filesystem corruption.
+ if os.path.exists(cpath):
+ cfile=open(cpath, "r")
+ try:
+ counter=long(cfile.readline())
+ except (ValueError,OverflowError):
+ try:
+ counter=long(commands.getoutput("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'"))
+ writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter)
+ changed=1
+ except (ValueError,OverflowError):
+ writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n")
+ writemsg("!!! corrected/normalized so that portage can operate properly.\n")
+ writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
+ sys.exit(2)
+ cfile.close()
+ else:
+ try:
+ counter=long(commands.getoutput("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'"))
+ writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("!!! Initializing global counter.\n")
+ counter=long(0)
+ changed=1
+
+ if counter < min_counter:
+ counter = min_counter+1000
+ changed = 1
+
+ if incrementing or changed:
+
+ #increment counter
+ counter += 1
+ # update new global counter file
+ newcpath=cpath+".new"
+ newcfile=open(newcpath,"w")
+ newcfile.write(str(counter))
+ newcfile.close()
+ # now move global counter file into place
+ os.rename(newcpath,cpath)
+ return counter
+
+ def invalidentry(self, mypath):
+ if re.search("portage_lockfile$",mypath):
+ if not os.environ.has_key("PORTAGE_MASTER_PID"):
+ writemsg("Lockfile removed: %s\n" % mypath, 1)
+ portage_locks.unlockfile((mypath,None,None))
+ else:
+ # Nothing we can do about it. We're probably sandboxed.
+ pass
+ elif re.search(".*/-MERGING-(.*)",mypath):
+ if os.path.exists(mypath):
+ writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n")
+ else:
+ writemsg("!!! Invalid db entry: %s\n" % mypath)
+
+
+
+class fakedbapi(dbapi):
+ "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
+ def __init__(self):
+ self.cpvdict={}
+ self.cpdict={}
+
+ def cpv_exists(self,mycpv):
+ return self.cpvdict.has_key(mycpv)
+
+ def cp_list(self,mycp,use_cache=1):
+ if not self.cpdict.has_key(mycp):
+ return []
+ else:
+ return self.cpdict[mycp]
+
+ def cp_all(self):
+ returnme=[]
+ for x in self.cpdict.keys():
+ returnme.extend(self.cpdict[x])
+ return returnme
+
+ def cpv_inject(self,mycpv):
+ """Adds a cpv from the list of available packages."""
+ mycp=cpv_getkey(mycpv)
+ self.cpvdict[mycpv]=1
+ if not self.cpdict.has_key(mycp):
+ self.cpdict[mycp]=[]
+ if not mycpv in self.cpdict[mycp]:
+ self.cpdict[mycp].append(mycpv)
+
+ #def cpv_virtual(self,oldcpv,newcpv):
+ # """Maps a cpv to the list of available packages."""
+ # mycp=cpv_getkey(newcpv)
+ # self.cpvdict[newcpv]=1
+ # if not self.virtdict.has_key(mycp):
+ # self.virtdict[mycp]=[]
+ # if not mycpv in self.virtdict[mycp]:
+ # self.virtdict[mycp].append(oldcpv)
+ # cpv_remove(oldcpv)
+
+ def cpv_remove(self,mycpv):
+ """Removes a cpv from the list of available packages."""
+ mycp=cpv_getkey(mycpv)
+ if self.cpvdict.has_key(mycpv):
+ del self.cpvdict[mycpv]
+ if not self.cpdict.has_key(mycp):
+ return
+ while mycpv in self.cpdict[mycp]:
+ del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
+ if not len(self.cpdict[mycp]):
+ del self.cpdict[mycp]
+
+class bindbapi(fakedbapi):
+ def __init__(self,mybintree=None):
+ self.bintree = mybintree
+ self.cpvdict={}
+ self.cpdict={}
+
+ def aux_get(self,mycpv,wants):
+ mysplit = string.split(mycpv,"/")
+ mylist = []
+ tbz2name = mysplit[1]+".tbz2"
+ if self.bintree and not self.bintree.isremote(mycpv):
+ tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
+ for x in wants:
+ if self.bintree and self.bintree.isremote(mycpv):
+ # We use the cache for remote packages
+ if self.bintree.remotepkgs[tbz2name].has_key(x):
+ mylist.append(self.bintree.remotepkgs[tbz2name][x][:]) # [:] Copy String
+ else:
+ mylist.append("")
+ else:
+ myval = tbz2.getfile(x)
+ if myval == None:
+ myval = ""
+ else:
+ myval = string.join(myval.split(),' ')
+ mylist.append(myval)
+
+ return mylist
+
+
+cptot=0
+class vardbapi(dbapi):
+ def __init__(self,root,categories=None):
+ self.root = root[:]
+ #cache for category directory mtimes
+ self.mtdircache = {}
+ #cache for dependency checks
+ self.matchcache = {}
+ #cache for cp_list results
+ self.cpcache = {}
+ self.blockers = None
+ self.categories = copy.deepcopy(categories)
+
+ def cpv_exists(self,mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ return os.path.exists(self.root+VDB_PATH+"/"+mykey)
+
+ def cpv_counter(self,mycpv):
+ "This method will grab the COUNTER. Returns a counter value."
+ cdir=self.root+VDB_PATH+"/"+mycpv
+ cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
+
+ # We write our new counter value to a new file that gets moved into
+ # place to avoid filesystem corruption on XFS (unexpected reboot.)
+ corrupted=0
+ if os.path.exists(cpath):
+ cfile=open(cpath, "r")
+ try:
+ counter=long(cfile.readline())
+ except ValueError:
+ print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
+ counter=long(0)
+ corrupted=1
+ cfile.close()
+ elif os.path.exists(cdir):
+ mys = pkgsplit(mycpv)
+ myl = self.match(mys[0],use_cache=0)
+ print mys,myl
+ if len(myl) == 1:
+ try:
+ # Only one package... Counter doesn't matter.
+ myf = open(cpath, "w")
+ myf.write("1")
+ myf.flush()
+ myf.close()
+ counter = 1
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
+ writemsg("!!! unmerge this exact version.\n")
+ writemsg("!!! %s\n" % e)
+ sys.exit(1)
+ else:
+ writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
+ writemsg("!!! remerge the package.\n")
+ sys.exit(1)
+ else:
+ counter=long(0)
+ if corrupted:
+ newcpath=cpath+".new"
+ # update new global counter file
+ newcfile=open(newcpath,"w")
+ newcfile.write(str(counter))
+ newcfile.close()
+ # now move global counter file into place
+ os.rename(newcpath,cpath)
+ return counter
+
+ def cpv_inject(self,mycpv):
+ "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
+ os.makedirs(self.root+VDB_PATH+"/"+mycpv)
+ counter=db[self.root]["vartree"].dbapi.counter_tick(self.root,mycpv=mycpv)
+ # write local package counter so that emerge clean does the right thing
+ lcfile=open(self.root+VDB_PATH+"/"+mycpv+"/COUNTER","w")
+ lcfile.write(str(counter))
+ lcfile.close()
+
+ def isInjected(self,mycpv):
+ if self.cpv_exists(mycpv):
+ if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
+ return True
+ if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
+ return True
+ return False
+
+ def move_ent(self,mylist):
+ origcp=mylist[1]
+ newcp=mylist[2]
+ origmatches=self.match(origcp,use_cache=0)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ mycpsplit=catpkgsplit(mycpv)
+ mynewcpv=newcp+"-"+mycpsplit[2]
+ mynewcat=newcp.split("/")[0]
+ if mycpsplit[3]!="r0":
+ mynewcpv += "-"+mycpsplit[3]
+ mycpsplit_new = catpkgsplit(mynewcpv)
+ origpath=self.root+VDB_PATH+"/"+mycpv
+ if not os.path.exists(origpath):
+ continue
+ writemsg("@")
+ if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
+ #create the directory
+ os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
+ newpath=self.root+VDB_PATH+"/"+mynewcpv
+ if os.path.exists(newpath):
+ #dest already exists; keep this puppy where it is.
+ continue
+ spawn(MOVE_BINARY+" "+origpath+" "+newpath,settings, free=1)
+
+ # We need to rename the ebuild now.
+ old_eb_path = newpath+"/"+mycpsplit[1] +"-"+mycpsplit[2]
+ new_eb_path = newpath+"/"+mycpsplit_new[1]+"-"+mycpsplit[2]
+ if mycpsplit[3] != "r0":
+ old_eb_path += "-"+mycpsplit[3]
+ new_eb_path += "-"+mycpsplit[3]
+ if os.path.exists(old_eb_path+".ebuild"):
+ os.rename(old_eb_path+".ebuild", new_eb_path+".ebuild")
+
+ catfile=open(newpath+"/CATEGORY", "w")
+ catfile.write(mynewcat+"\n")
+ catfile.close()
+
+ dbdir = self.root+VDB_PATH
+ for catdir in listdir(dbdir):
+ catdir = dbdir+"/"+catdir
+ if os.path.isdir(catdir):
+ for pkgdir in listdir(catdir):
+ pkgdir = catdir+"/"+pkgdir
+ if os.path.isdir(pkgdir):
+ fixdbentries(origcp, newcp, pkgdir)
+
+ def move_slot_ent(self,mylist):
+ pkg=mylist[1]
+ origslot=mylist[2]
+ newslot=mylist[3]
+
+ origmatches=self.match(pkg,use_cache=0)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ origpath=self.root+VDB_PATH+"/"+mycpv
+ if not os.path.exists(origpath):
+ continue
+
+ slot=grabfile(origpath+"/SLOT");
+ if (not slot):
+ continue
+
+ if (slot[0]!=origslot):
+ continue
+
+ writemsg("s")
+ slotfile=open(origpath+"/SLOT", "w")
+ slotfile.write(newslot+"\n")
+ slotfile.close()
+
+ def cp_list(self,mycp,use_cache=1):
+ mysplit=mycp.split("/")
+ if mysplit[0] == '*':
+ mysplit[0] = mysplit[0][1:]
+ try:
+ mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
+ except OSError:
+ mystat=0
+ if use_cache and self.cpcache.has_key(mycp):
+ cpc=self.cpcache[mycp]
+ if cpc[0]==mystat:
+ return cpc[1]
+ list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
+
+ if (list==None):
+ return []
+ returnme=[]
+ for x in list:
+ if x[0] == '-':
+ #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
+ continue
+ ps=pkgsplit(x)
+ if not ps:
+ self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
+ continue
+ if len(mysplit) > 1:
+ if ps[0]==mysplit[1]:
+ returnme.append(mysplit[0]+"/"+x)
+ if use_cache:
+ self.cpcache[mycp]=[mystat,returnme]
+ elif self.cpcache.has_key(mycp):
+ del self.cpcache[mycp]
+ return returnme
+
+ def cpv_all(self,use_cache=1):
+ returnme=[]
+ basepath = self.root+VDB_PATH+"/"
+
+ mycats = self.categories
+ if mycats == None:
+ # XXX: CIRCULAR DEP! This helps backwards compat. --NJ (10 Sept 2004)
+ mycats = settings.categories
+
+ for x in mycats:
+ for y in listdir(basepath+x,EmptyOnError=1):
+ subpath = x+"/"+y
+ # -MERGING- should never be a cpv, nor should files.
+ if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
+ returnme += [subpath]
+ return returnme
+
+ def cp_all(self,use_cache=1):
+ mylist = self.cpv_all(use_cache=use_cache)
+ d={}
+ for y in mylist:
+ if y[0] == '*':
+ y = y[1:]
+ mysplit=catpkgsplit(y)
+ if not mysplit:
+ self.invalidentry(self.root+VDB_PATH+"/"+y)
+ continue
+ d[mysplit[0]+"/"+mysplit[1]] = None
+ return d.keys()
+
+ def checkblockers(self,origdep):
+ pass
+
+ def match(self,origdep,use_cache=1):
+ "caching match function"
+ mydep=dep_expand(origdep,mydb=self,use_cache=use_cache)
+ mykey=dep_getkey(mydep)
+ mycat=mykey.split("/")[0]
+ if not use_cache:
+ if self.matchcache.has_key(mycat):
+ del self.mtdircache[mycat]
+ del self.matchcache[mycat]
+ return match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
+ try:
+ curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
+ except SystemExit, e:
+ raise
+ except:
+ curmtime=0
+
+ if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
+ # clear cache entry
+ self.mtdircache[mycat]=curmtime
+ self.matchcache[mycat]={}
+ if not self.matchcache[mycat].has_key(mydep):
+ mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
+ self.matchcache[mycat][mydep]=mymatch
+ return self.matchcache[mycat][mydep][:]
+
+ def findname(self, mycpv):
+ return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
+
+ def aux_get(self, mycpv, wants):
+ global auxdbkeys
+ results = []
+ if not self.cpv_exists(mycpv):
+ return []
+ for x in wants:
+ myfn = self.root+VDB_PATH+"/"+str(mycpv)+"/"+str(x)
+ if os.access(myfn,os.R_OK):
+ myf = open(myfn, "r")
+ myd = myf.read()
+ myf.close()
+ myd = re.sub("[\n\r\t]+"," ",myd)
+ myd = re.sub(" +"," ",myd)
+ myd = string.strip(myd)
+ else:
+ myd = ""
+ results.append(myd)
+ return results
+
+
+class vartree(packagetree):
+ "this tree will scan a var/db/pkg database located at root (passed to init)"
+ def __init__(self,root="/",virtual=None,clone=None,categories=None):
+ if clone:
+ self.root = clone.root[:]
+ self.dbapi = copy.deepcopy(clone.dbapi)
+ self.populated = 1
+ else:
+ self.root = root[:]
+ self.dbapi = vardbapi(self.root,categories=categories)
+ self.populated = 1
+
+ def zap(self,mycpv):
+ return
+
+ def inject(self,mycpv):
+ return
+
+ def get_provide(self,mycpv):
+ myprovides=[]
+ try:
+ mylines = grabfile(self.root+VDB_PATH+"/"+mycpv+"/PROVIDE")
+ if mylines:
+ myuse = grabfile(self.root+VDB_PATH+"/"+mycpv+"/USE")
+ myuse = string.split(string.join(myuse))
+ mylines = string.join(mylines)
+ mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
+ for myprovide in mylines:
+ mys = catpkgsplit(myprovide)
+ if not mys:
+ mys = string.split(myprovide, "/")
+ myprovides += [mys[0] + "/" + mys[1]]
+ return myprovides
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print
+ print "Check " + self.root+VDB_PATH+"/"+mycpv+"/PROVIDE and USE."
+ print "Possibly Invalid: " + str(mylines)
+ print "Exception: "+str(e)
+ print
+ return []
+
+ def get_all_provides(self):
+ myprovides = {}
+ for node in self.getallcpv():
+ for mykey in self.get_provide(node):
+ if myprovides.has_key(mykey):
+ myprovides[mykey] += [node]
+ else:
+ myprovides[mykey] = [node]
+ return myprovides
+
+ def dep_bestmatch(self,mydep,use_cache=1):
+ "compatibility method -- all matches, not just visible ones"
+ #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
+ mymatch=best(self.dbapi.match(dep_expand(mydep,mydb=self.dbapi),use_cache=use_cache))
+ if mymatch==None:
+ return ""
+ else:
+ return mymatch
+
+ def dep_match(self,mydep,use_cache=1):
+ "compatibility method -- we want to see all matches, not just visible ones"
+ #mymatch=match(mydep,self.dbapi)
+ mymatch=self.dbapi.match(mydep,use_cache=use_cache)
+ if mymatch==None:
+ return []
+ else:
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallcpv(self):
+ """temporary function, probably to be renamed --- Gets a list of all
+ category/package-versions installed on the system."""
+ return self.dbapi.cpv_all()
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def exists_specific_cat(self,cpv,use_cache=1):
+ cpv=key_expand(cpv,mydb=self.dbapi,use_cache=use_cache)
+ a=catpkgsplit(cpv)
+ if not a:
+ return 0
+ mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
+ for x in mylist:
+ b=pkgsplit(x)
+ if not b:
+ self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
+ continue
+ if a[1]==b[0]:
+ return 1
+ return 0
+
+ def getebuildpath(self,fullpackage):
+ cat,package=fullpackage.split("/")
+ return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
+
+ def getnode(self,mykey,use_cache=1):
+ mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
+ if not mykey:
+ return []
+ mysplit=mykey.split("/")
+ mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
+ returnme=[]
+ for x in mydirlist:
+ mypsplit=pkgsplit(x)
+ if not mypsplit:
+ self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
+ continue
+ if mypsplit[0]==mysplit[1]:
+ appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
+ returnme.append(appendme)
+ return returnme
+
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot=string.join(grabfile(self.root+VDB_PATH+"/"+mycatpkg+"/SLOT"))
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+ return myslot
+
+ def hasnode(self,mykey,use_cache):
+ """Does the particular node (cat/pkg key) exist?"""
+ mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
+ mysplit=mykey.split("/")
+ mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
+ for x in mydirlist:
+ mypsplit=pkgsplit(x)
+ if not mypsplit:
+ self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
+ continue
+ if mypsplit[0]==mysplit[1]:
+ return 1
+ return 0
+
+ def populate(self):
+ self.populated=1
+
+# ----------------------------------------------------------------------------
+class eclass_cache:
+ """Maintains the cache information about eclasses used in ebuild."""
+ def __init__(self,porttree_root,settings):
+ self.porttree_root = porttree_root
+ self.settings = settings
+ self.depcachedir = self.settings.depcachedir[:]
+
+ self.dbmodule = self.settings.load_best_module("eclass_cache.dbmodule")
+
+ self.packages = {} # {"PV": {"eclass1": ["location", "_mtime_"]}}
+ self.eclasses = {} # {"Name": ["location","_mtime_"]}
+
+ # don't fool with porttree ordering unless you *ensure* that ebuild.sh's inherit
+ # ordering is *exactly* the same
+ self.porttrees=[self.porttree_root]
+ self.porttrees.extend(self.settings["PORTDIR_OVERLAY"].split())
+ #normalize the path now, so it's not required later.
+ self.porttrees = [os.path.normpath(x) for x in self.porttrees]
+ self.update_eclasses()
+
+ def close_caches(self):
+ for x in self.packages.keys():
+ for y in self.packages[x].keys():
+ try:
+ self.packages[x][y].sync()
+ self.packages[x][y].close()
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ writemsg("Exception when closing DB: %s: %s\n" % (Exception,e))
+ del self.packages[x][y]
+ del self.packages[x]
+
+ def flush_cache(self):
+ self.packages = {}
+ self.eclasses = {}
+ self.update_eclasses()
+
+ def update_eclasses(self):
+ self.eclasses = {}
+ for x in suffix_array(self.porttrees, "/eclass"):
+ if x and os.path.exists(x):
+ dirlist = listdir(x)
+ for y in dirlist:
+ if y[-len(".eclass"):]==".eclass":
+ try:
+ ys=y[:-len(".eclass")]
+ ymtime=os.stat(x+"/"+y)[stat.ST_MTIME]
+ except SystemExit, e:
+ raise
+ except:
+ continue
+ self.eclasses[ys] = [x, ymtime]
+
+ def setup_package(self, location, cat, pkg):
+ if not self.packages.has_key(location):
+ self.packages[location] = {}
+
+ if not self.packages[location].has_key(cat):
+ try:
+ self.packages[location][cat] = self.dbmodule(self.depcachedir+"/"+location, cat+"-eclass", [], uid, portage_gid)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("\n!!! Failed to open the dbmodule for eclass caching.\n")
+ writemsg("!!! Generally these are permission problems. Caught exception follows:\n")
+ writemsg("!!! "+str(e)+"\n")
+ writemsg("!!! Dirname: "+str(self.depcachedir+"/"+location)+"\n")
+ writemsg("!!! Basename: "+str(cat+"-eclass")+"\n\n")
+ sys.exit(123)
+
+ def sync(self, location, cat, pkg):
+ if self.packages[location].has_key(cat):
+ self.packages[location][cat].sync()
+
+ def update_package(self, location, cat, pkg, eclass_list):
+ self.setup_package(location, cat, pkg)
+ if not eclass_list:
+ return 1
+
+ data = {}
+ for x in eclass_list:
+ if x not in self.eclasses:
+ writemsg("Eclass '%s' does not exist for '%s'\n" % (x, cat+"/"+pkg))
+ return 0
+ data[x] = [self.eclasses[x][0],self.eclasses[x][1]]
+
+ self.packages[location][cat][pkg] = data
+ self.sync(location,cat,pkg)
+ return 1
+
+ def is_current(self, location, cat, pkg, eclass_list):
+ self.setup_package(location, cat, pkg)
+
+ if not eclass_list:
+ return 1
+
+ if not (self.packages[location][cat].has_key(pkg) and self.packages[location][cat][pkg] and eclass_list):
+ return 0
+
+ myp = self.packages[location][cat][pkg]
+ for x in eclass_list:
+ if not (x in self.eclasses and x in myp and myp[x] == self.eclasses[x]):
+ return 0
+
+ return 1
+
+# ----------------------------------------------------------------------------
+
+auxdbkeys=[
+ 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
+ 'PDEPEND', 'PROVIDE',
+ 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
+ 'UNUSED_05', 'UNUSED_06', 'UNUSED_07', 'UNUSED_08',
+ ]
+auxdbkeylen=len(auxdbkeys)
+
+def close_portdbapi_caches():
+ for i in portdbapi.portdbapi_instances:
+ i.close_caches()
+class portdbapi(dbapi):
+ """this tree will scan a portage directory located at root (passed to init)"""
+ portdbapi_instances = []
+
+ def __init__(self,porttree_root,mysettings=None):
+ portdbapi.portdbapi_instances.append(self)
+ self.lock_held = 0;
+
+ if mysettings:
+ self.mysettings = mysettings
+ else:
+ self.mysettings = config(clone=settings)
+
+ self.manifestVerifyLevel = None
+ self.manifestVerifier = None
+ self.manifestCache = {} # {location: [stat, md5]}
+ self.manifestMissingCache = []
+
+ if "gpg" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.EXISTS
+ if "strict" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.MARGINAL
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+ elif "severe" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.TRUSTED
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
+ else:
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+
+ #self.root=settings["PORTDIR"]
+ self.porttree_root = porttree_root
+
+ self.depcachedir = self.mysettings.depcachedir[:]
+
+ self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
+ if self.tmpfs and not os.path.exists(self.tmpfs):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
+ self.tmpfs = None
+
+ self.eclassdb = eclass_cache(self.porttree_root, self.mysettings)
+
+ self.metadb = {}
+ self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
+
+ self.auxdb = {}
+ self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
+
+ #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+ self.xcache={}
+ self.frozen=0
+
+ self.porttrees=[self.porttree_root]+self.mysettings["PORTDIR_OVERLAY"].split()
+
+ def close_caches(self):
+ for x in self.auxdb.keys():
+ for y in self.auxdb[x].keys():
+ self.auxdb[x][y].sync()
+ self.auxdb[x][y].close()
+ del self.auxdb[x][y]
+ del self.auxdb[x]
+ self.eclassdb.close_caches()
+
+ def flush_cache(self):
+ self.metadb = {}
+ self.auxdb = {}
+ self.eclassdb.flush_cache()
+
+ def finddigest(self,mycpv):
+ try:
+ mydig = self.findname2(mycpv)[0]
+ mydigs = string.split(mydig, "/")[:-1]
+ mydig = string.join(mydigs, "/")
+
+ mysplit = mycpv.split("/")
+ except SystemExit, e:
+ raise
+ except:
+ return ""
+ return mydig+"/files/digest-"+mysplit[-1]
+
+ def findname(self,mycpv):
+ return self.findname2(mycpv)[0]
+
+ def findname2(self,mycpv):
+ "returns file location for this particular package and in_overlay flag"
+ if not mycpv:
+ return "",0
+ mysplit=mycpv.split("/")
+
+ psplit=pkgsplit(mysplit[1])
+ ret=None
+ if psplit:
+ for x in self.porttrees:
+ # XXX Why are there errors here? XXX
+ try:
+ file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print
+ print "!!! Problem with determining the name/location of an ebuild."
+ print "!!! Please report this on IRC and bugs if you are not causing it."
+ print "!!! mycpv: ",mycpv
+ print "!!! mysplit:",mysplit
+ print "!!! psplit: ",psplit
+ print "!!! error: ",e
+ print
+ sys.exit(17)
+
+ if os.access(file, os.R_OK):
+ # when found
+ ret=[file, x]
+ if ret:
+ return ret[0], ret[1]
+
+ # when not found
+ return None, 0
+
+ def aux_get(self,mycpv,mylist,strict=0,metacachedir=None,debug=0):
+ "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
+ global auxdbkeys,auxdbkeylen
+
+ cat,pkg = string.split(mycpv, "/", 1)
+
+ if metacachedir:
+ if cat not in self.metadb:
+ self.metadb[cat] = self.metadbmodule(metacachedir,cat,auxdbkeys,uid,portage_gid)
+
+ myebuild, mylocation=self.findname2(mycpv)
+
+ if not myebuild:
+ writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv})
+ writemsg("!!! %s\n" % myebuild)
+ raise KeyError, "'%(cpv)s' at %(path)s" % {"cpv":mycpv,"path":myebuild}
+
+ myManifestPath = string.join(myebuild.split("/")[:-1],"/")+"/Manifest"
+ if "gpg" in self.mysettings.features:
+ try:
+ mys = portage_gpg.fileStats(myManifestPath)
+ if (myManifestPath in self.manifestCache) and \
+ (self.manifestCache[myManifestPath] == mys):
+ pass
+ elif self.manifestVerifier:
+ if not self.manifestVerifier.verify(myManifestPath):
+ # Verification failed the desired level.
+ raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
+
+ if ("severe" in self.mysettings.features) and \
+ (mys != portage_gpg.fileStats(myManifestPath)):
+ raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
+
+ except portage_exception.InvalidSignature, e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise
+ writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
+ except portage_exception.MissingSignature, e:
+ if ("severe" in self.mysettings.features):
+ raise
+ if ("strict" in self.mysettings.features):
+ if myManifestPath not in self.manifestMissingCache:
+ writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
+ self.manifestMissingCache.insert(0,myManifestPath)
+ except (OSError,portage_exception.FileNotFound), e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
+ writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath})
+
+ if mylocation not in self.auxdb:
+ self.auxdb[mylocation] = {}
+
+ if not self.auxdb[mylocation].has_key(cat):
+ self.auxdb[mylocation][cat] = self.auxdbmodule(self.depcachedir+"/"+mylocation,cat,auxdbkeys,uid,portage_gid)
+
+ if os.access(myebuild, os.R_OK):
+ emtime=os.stat(myebuild)[stat.ST_MTIME]
+ else:
+ writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv})
+ writemsg("!!! %s\n" % myebuild)
+ raise KeyError
+
+ try:
+ auxdb_is_valid = self.auxdb[mylocation][cat].has_key(pkg) and \
+ self.auxdb[mylocation][cat][pkg].has_key("_mtime_") and \
+ self.auxdb[mylocation][cat][pkg]["_mtime_"] == emtime
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ auxdb_is_valid = 0
+ writemsg("auxdb exception: [%(loc)s]: %(exception)s\n" % {"loc":mylocation+"::"+cat+"/"+pkg, "exception":str(e)})
+ if self.auxdb[mylocation][cat].has_key(pkg):
+ self.auxdb[mylocation][cat].del_key(pkg)
+ self.auxdb[mylocation][cat].sync()
+
+ writemsg("auxdb is valid: "+str(auxdb_is_valid)+" "+str(pkg)+"\n", 2)
+ doregen = not (auxdb_is_valid and self.eclassdb.is_current(mylocation,cat,pkg,self.auxdb[mylocation][cat][pkg]["INHERITED"].split()))
+
+ # when mylocation is not overlay directorys and metacachedir is set,
+ # we use cache files, which is usually on /usr/portage/metadata/cache/.
+ if doregen and mylocation==self.mysettings["PORTDIR"] and metacachedir and self.metadb[cat].has_key(pkg):
+ metadata=self.metadb[cat][pkg]
+ self.eclassdb.update_package(mylocation,cat,pkg,metadata["INHERITED"].split())
+ self.auxdb[mylocation][cat][pkg] = metadata
+ self.auxdb[mylocation][cat].sync()
+ elif doregen:
+
+ writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
+ writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
+
+ if self.tmpfs:
+ mydbkey = self.tmpfs+"/aux_db_key_temp"
+ else:
+ mydbkey = self.depcachedir+"/aux_db_key_temp"
+
+ # XXX: Part of the gvisible hack/fix to prevent deadlock
+ # XXX: through doebuild. Need to isolate this somehow...
+ self.mysettings.reset()
+
+ if self.lock_held:
+ raise "Lock is already held by me?"
+ self.lock_held = 1
+ mylock = portage_locks.lockfile(mydbkey, wantnewlockfile=1)
+
+ if os.path.exists(mydbkey):
+ try:
+ os.unlink(mydbkey)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ portage_locks.unlockfile(mylock)
+ self.lock_held = 0
+ writemsg("Uncaught handled exception: %(exception)s\n" % {"exception":str(e)})
+ raise
+
+ myret=doebuild(myebuild,"depend","/",self.mysettings,dbkey=mydbkey)
+ if myret:
+ portage_locks.unlockfile(mylock)
+ self.lock_held = 0
+ #depend returned non-zero exit code...
+ writemsg(str(red("\naux_get():")+" (0) Error in "+mycpv+" ebuild. ("+str(myret)+")\n"
+ " Check for syntax error or corruption in the ebuild. (--debug)\n\n"))
+ raise KeyError
+
+ try:
+ mycent=open(mydbkey,"r")
+ os.unlink(mydbkey)
+ mylines=mycent.readlines()
+ mycent.close()
+ except SystemExit, e:
+ raise
+ except (IOError, OSError):
+ portage_locks.unlockfile(mylock)
+ self.lock_held = 0
+ writemsg(str(red("\naux_get():")+" (1) Error in "+mycpv+" ebuild.\n"
+ " Check for syntax error or corruption in the ebuild. (--debug)\n\n"))
+ raise KeyError
+ except Exception, e:
+ portage_locks.unlockfile(mylock)
+ self.lock_held = 0
+ writemsg("Uncaught handled exception: %(exception)s\n" % {"exception":str(e)})
+ raise
+
+ portage_locks.unlockfile(mylock)
+ self.lock_held = 0
+
+ mydata = {}
+ for x in range(0,len(mylines)):
+ if mylines[x][-1] == '\n':
+ mylines[x] = mylines[x][:-1]
+ mydata[auxdbkeys[x]] = mylines[x]
+ mydata["_mtime_"] = emtime
+
+ self.auxdb[mylocation][cat][pkg] = mydata
+ self.auxdb[mylocation][cat].sync()
+ if not self.eclassdb.update_package(mylocation, cat, pkg, mylines[auxdbkeys.index("INHERITED")].split()):
+ sys.exit(1)
+
+ #finally, we look at our internal cache entry and return the requested data.
+ mydata = self.auxdb[mylocation][cat][pkg]
+ returnme = []
+ for x in mylist:
+ if mydata.has_key(x):
+ returnme.append(mydata[x])
+ else:
+ returnme.append("")
+
+ return returnme
+
+ def getfetchlist(self,mypkg,useflags=None,mysettings=None,all=0):
+ if mysettings == None:
+ mysettings = self.mysettings
+ try:
+ myuris = self.aux_get(mypkg,["SRC_URI"])[0]
+ except (IOError,KeyError):
+ print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
+ sys.exit(1)
+
+ useflags = string.split(mysettings["USE"])
+
+ myurilist = portage_dep.paren_reduce(myuris)
+ myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
+ newuris = flatten(myurilist)
+
+ myfiles = []
+ for x in newuris:
+ mya = os.path.basename(x)
+ if not mya in myfiles:
+ myfiles.append(mya)
+ return [newuris, myfiles]
+
+ def getfetchsizes(self,mypkg,useflags=None,debug=0):
+ # returns a filename:size dictionnary of remaining downloads
+ mydigest=self.finddigest(mypkg)
+ mymd5s=digestParseFile(mydigest)
+ if not mymd5s:
+ if debug: print "[empty/missing/bad digest]: "+mypkg
+ return None
+ filesdict={}
+ if useflags == None:
+ myuris, myfiles = self.getfetchlist(mypkg,all=1)
+ else:
+ myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
+ #XXX: maybe this should be improved: take partial downloads
+ # into account? check md5sums?
+ for myfile in myfiles:
+ if debug and myfile not in mymd5s.keys():
+ print "[bad digest]: missing",myfile,"for",mypkg
+ elif myfile in mymd5s.keys():
+ distfile=settings["DISTDIR"]+"/"+myfile
+ if not os.access(distfile, os.R_OK):
+ filesdict[myfile]=int(mymd5s[myfile]["size"])
+ return filesdict
+
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
+ if not useflags:
+ if mysettings:
+ useflags = mysettings["USE"].split()
+ myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
+ mydigest = self.finddigest(mypkg)
+ mysums = digestParseFile(mydigest)
+
+ failures = {}
+ for x in myfiles:
+ if not mysums or x not in mysums:
+ ok = False
+ reason = "digest missing"
+ else:
+ ok,reason = portage_checksum.verify_all(self.mysettings["DISTDIR"]+"/"+x, mysums[x])
+ if not ok:
+ failures[x] = reason
+ if failures:
+ return False
+ return True
+
+ def getsize(self,mypkg,useflags=None,debug=0):
+ # returns the total size of remaining downloads
+ #
+ # we use getfetchsizes() now, so this function would be obsoleted
+ #
+ filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
+ if filesdict==None:
+ return "[empty/missing/bad digest]"
+ mysize=0
+ for myfile in filesdict.keys():
+ mysum+=filesdict[myfile]
+ return mysum
+
+ def cpv_exists(self,mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ cps2=mykey.split("/")
+ cps=catpkgsplit(mykey,silent=0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
+ if self.findname(cps[0]+"/"+cps2[1]):
+ return 1
+ else:
+ return 0
+
+ def cp_all(self):
+ "returns a list of all keys in our tree"
+ d={}
+ for x in self.mysettings.categories:
+ for oroot in self.porttrees:
+ for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1):
+ mykey=x+"/"+y
+ d[x+"/"+y] = None
+ l = d.keys()
+ l.sort()
+ return l
+
+ def p_list(self,mycp):
+ d={}
+ for oroot in self.porttrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x[-7:]==".ebuild":
+ d[x[:-7]] = None
+ return d.keys()
+
+ def cp_list(self,mycp,use_cache=1):
+ mysplit=mycp.split("/")
+ d={}
+ for oroot in self.porttrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x[-7:]==".ebuild":
+ d[mysplit[0]+"/"+x[:-7]] = None
+ return d.keys()
+
+ def freeze(self):
+ for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
+ self.xcache[x]={}
+ self.frozen=1
+
+ def melt(self):
+ self.xcache={}
+ self.frozen=0
+
+ def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+ "caching match function; very trick stuff"
+ #if no updates are being made to the tree, we can consult our xcache...
+ if self.frozen:
+ try:
+ return self.xcache[level][origdep]
+ except KeyError:
+ pass
+
+ if not mydep:
+ #this stuff only runs on first call of xmatch()
+ #create mydep, mykey from origdep
+ mydep=dep_expand(origdep,mydb=self)
+ mykey=dep_getkey(mydep)
+
+ if level=="list-visible":
+ #a list of all visible packages, not called directly (just by xmatch())
+ #myval=self.visible(self.cp_list(mykey))
+ myval=self.gvisible(self.visible(self.cp_list(mykey)))
+ elif level=="bestmatch-visible":
+ #dep match -- best match of all visible packages
+ myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
+ #get all visible matches (from xmatch()), then choose the best one
+ elif level=="bestmatch-list":
+ #dep match -- find best match but restrict search to sublist
+ myval=best(match_from_list(mydep,mylist))
+ #no point is calling xmatch again since we're not caching list deps
+ elif level=="match-list":
+ #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+ myval=match_from_list(mydep,mylist)
+ elif level=="match-visible":
+ #dep match -- find all visible matches
+ myval=match_from_list(mydep,self.xmatch("list-visible",None,mydep=mydep,mykey=mykey))
+ #get all visible packages, then get the matching ones
+ elif level=="match-all":
+ #match *all* visible *and* masked packages
+ myval=match_from_list(mydep,self.cp_list(mykey))
+ else:
+ print "ERROR: xmatch doesn't handle",level,"query!"
+ raise KeyError
+ if self.frozen and (level not in ["match-list","bestmatch-list"]):
+ self.xcache[level][mydep]=myval
+ return myval
+
+ def match(self,mydep,use_cache=1):
+ return self.xmatch("match-visible",mydep)
+
+ def visible(self,mylist):
+ """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
+ packages file to remove invisible entries, returning remaining items. This function assumes
+ that all entries in mylist have the same category and package name."""
+ if (mylist==None) or (len(mylist)==0):
+ return []
+ newlist=mylist[:]
+ #first, we mask out packages in the package.mask file
+ mykey=newlist[0]
+ cpv=catpkgsplit(mykey)
+ if not cpv:
+ #invalid cat/pkg-v
+ print "visible(): invalid cat/pkg-v:",mykey
+ return []
+ mycp=cpv[0]+"/"+cpv[1]
+ maskdict=self.mysettings.pmaskdict
+ unmaskdict=self.mysettings.punmaskdict
+ if maskdict.has_key(mycp):
+ for x in maskdict[mycp]:
+ mymatches=self.xmatch("match-all",x)
+ if mymatches==None:
+ #error in package.mask file; print warning and continue:
+ print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
+ continue
+ for y in mymatches:
+ unmask=0
+ if unmaskdict.has_key(mycp):
+ for z in unmaskdict[mycp]:
+ mymatches_unmask=self.xmatch("match-all",z)
+ if y in mymatches_unmask:
+ unmask=1
+ break
+ if unmask==0:
+ try:
+ newlist.remove(y)
+ except ValueError:
+ pass
+
+ revmaskdict=self.mysettings.prevmaskdict
+ if revmaskdict.has_key(mycp):
+ for x in revmaskdict[mycp]:
+ #important: only match against the still-unmasked entries...
+ #notice how we pass "newlist" to the xmatch() call below....
+ #Without this, ~ deps in the packages files are broken.
+ mymatches=self.xmatch("match-list",x,mylist=newlist)
+ if mymatches==None:
+ #error in packages file; print warning and continue:
+ print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
+ continue
+ pos=0
+ while pos<len(newlist):
+ if newlist[pos] not in mymatches:
+ del newlist[pos]
+ else:
+ pos += 1
+ return newlist
+
+ def gvisible(self,mylist):
+ "strip out group-masked (not in current group) entries"
+ global groups
+ if mylist==None:
+ return []
+ newlist=[]
+
+ pkgdict = self.mysettings.pkeywordsdict
+ for mycpv in mylist:
+ #we need to update this next line when we have fully integrated the new db api
+ auxerr=0
+ try:
+ myaux=db["/"]["porttree"].dbapi.aux_get(mycpv, ["KEYWORDS"])
+ except (KeyError,IOError,TypeError):
+ continue
+ if not myaux[0]:
+ # KEYWORDS=""
+ #print "!!! No KEYWORDS for "+str(mycpv)+" -- Untested Status"
+ continue
+ mygroups=myaux[0].split()
+ pgroups=groups[:]
+ match=0
+ cp = dep_getkey(mycpv)
+ if pkgdict.has_key(cp):
+ matches = match_to_list(mycpv, pkgdict[cp].keys())
+ for atom in matches:
+ pgroups.extend(pkgdict[cp][atom])
+ hasstable = False
+ hastesting = False
+ for gp in mygroups:
+ if gp=="*":
+ writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv)
+ match=1
+ break
+ elif "-"+gp in pgroups:
+ match=0
+ break
+ elif gp in pgroups:
+ match=1
+ break
+ elif gp[0] == "~":
+ hastesting = True
+ elif gp[0] != "-":
+ hasstable = True
+ if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups)):
+ match=1
+ if match:
+ newlist.append(mycpv)
+ return newlist
+
+class binarytree(packagetree):
+ "this tree scans for a list of all packages available in PKGDIR"
+ def __init__(self,root,pkgdir,virtual=None,clone=None):
+
+ if clone:
+ # XXX This isn't cloning. It's an instance of the same thing.
+ self.root=clone.root
+ self.pkgdir=clone.pkgdir
+ self.dbapi=clone.dbapi
+ self.populated=clone.populated
+ self.tree=clone.tree
+ self.remotepkgs=clone.remotepkgs
+ self.invalids=clone.invalids
+ else:
+ self.root=root
+ #self.pkgdir=settings["PKGDIR"]
+ self.pkgdir=pkgdir
+ self.dbapi=bindbapi(self)
+ self.populated=0
+ self.tree={}
+ self.remotepkgs={}
+ self.invalids=[]
+
+ def move_ent(self,mylist):
+ if not self.populated:
+ self.populate()
+ origcp=mylist[1]
+ newcp=mylist[2]
+ mynewcat=newcp.split("/")[0]
+ origmatches=self.dbapi.cp_list(origcp)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+
+ mycpsplit=catpkgsplit(mycpv)
+ mynewcpv=newcp+"-"+mycpsplit[2]
+ if mycpsplit[3]!="r0":
+ mynewcpv += "-"+mycpsplit[3]
+ myoldpkg=mycpv.split("/")[1]
+ mynewpkg=mynewcpv.split("/")[1]
+
+ if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
+ writemsg("!!! Cannot update binary: Destination exists.\n")
+ writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n")
+ continue
+
+ tbz2path=self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
+ continue
+
+ #print ">>> Updating data in:",mycpv
+ sys.stdout.write("%")
+ sys.stdout.flush()
+ mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
+ mytbz2=xpak.tbz2(tbz2path)
+ mytbz2.decompose(mytmpdir, cleanup=1)
+
+ fixdbentries(origcp, newcp, mytmpdir)
+
+ catfile=open(mytmpdir+"/CATEGORY", "w")
+ catfile.write(mynewcat+"\n")
+ catfile.close()
+ try:
+ os.rename(mytmpdir+"/"+string.split(mycpv,"/")[1]+".ebuild", mytmpdir+"/"+string.split(mynewcpv, "/")[1]+".ebuild")
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+
+ mytbz2.recompose(mytmpdir, cleanup=1)
+
+ self.dbapi.cpv_remove(mycpv)
+ if (mynewpkg != myoldpkg):
+ os.rename(tbz2path,self.getname(mynewcpv))
+ self.dbapi.cpv_inject(mynewcpv)
+ return 1
+
+ def move_slot_ent(self,mylist,mytmpdir):
+ #mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
+ mytmpdir=mytmpdir+"/tbz2"
+ if not self.populated:
+ self.populate()
+ pkg=mylist[1]
+ origslot=mylist[2]
+ newslot=mylist[3]
+ origmatches=self.dbapi.match(pkg)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ mycpsplit=catpkgsplit(mycpv)
+ myoldpkg=mycpv.split("/")[1]
+ tbz2path=self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
+ continue
+
+ #print ">>> Updating data in:",mycpv
+ mytbz2=xpak.tbz2(tbz2path)
+ mytbz2.decompose(mytmpdir, cleanup=1)
+
+ slot=grabfile(mytmpdir+"/SLOT");
+ if (not slot):
+ continue
+
+ if (slot[0]!=origslot):
+ continue
+
+ sys.stdout.write("S")
+ sys.stdout.flush()
+
+ slotfile=open(mytmpdir+"/SLOT", "w")
+ slotfile.write(newslot+"\n")
+ slotfile.close()
+ mytbz2.recompose(mytmpdir, cleanup=1)
+ return 1
+
+ def update_ents(self,mybiglist,mytmpdir):
+ #XXX mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
+ if not self.populated:
+ self.populate()
+ for mycpv in self.dbapi.cp_all():
+ tbz2path=self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
+ continue
+ #print ">>> Updating binary data:",mycpv
+ writemsg("*")
+ mytbz2=xpak.tbz2(tbz2path)
+ mytbz2.decompose(mytmpdir,cleanup=1)
+ for mylist in mybiglist:
+ mylist=string.split(mylist)
+ if mylist[0] != "move":
+ continue
+ fixdbentries(mylist[1], mylist[2], mytmpdir)
+ mytbz2.recompose(mytmpdir,cleanup=1)
+ return 1
+
+ def populate(self, getbinpkgs=0,getbinpkgsonly=0):
+ "populates the binarytree"
+ if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
+ return 0
+ if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
+ return 0
+
+ if (not getbinpkgsonly) and os.path.exists(self.pkgdir+"/All"):
+ for mypkg in listdir(self.pkgdir+"/All"):
+ if mypkg[-5:]!=".tbz2":
+ continue
+ mytbz2=xpak.tbz2(self.pkgdir+"/All/"+mypkg)
+ mycat=mytbz2.getfile("CATEGORY")
+ if not mycat:
+ #old-style or corrupt package
+ writemsg("!!! Invalid binary package: "+mypkg+"\n")
+ self.invalids.append(mypkg)
+ continue
+ mycat=string.strip(mycat)
+ fullpkg=mycat+"/"+mypkg[:-5]
+ mykey=dep_getkey(fullpkg)
+ try:
+ # invalid tbz2's can hurt things.
+ self.dbapi.cpv_inject(fullpkg)
+ except SystemExit, e:
+ raise
+ except:
+ continue
+
+ if getbinpkgs and not settings["PORTAGE_BINHOST"]:
+ writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"))
+
+ if getbinpkgs and settings["PORTAGE_BINHOST"] and not self.remotepkgs:
+ try:
+ chunk_size = long(settings["PORTAGE_BINHOST_CHUNKSIZE"])
+ if chunk_size < 8:
+ chunk_size = 8
+ except SystemExit, e:
+ raise
+ except:
+ chunk_size = 3000
+
+ writemsg(green("Fetching binary packages info...\n"))
+ self.remotepkgs = getbinpkg.dir_get_metadata(settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
+ writemsg(green(" -- DONE!\n\n"))
+
+ for mypkg in self.remotepkgs.keys():
+ if not self.remotepkgs[mypkg].has_key("CATEGORY"):
+ #old-style or corrupt package
+ writemsg("!!! Invalid remote binary package: "+mypkg+"\n")
+ del self.remotepkgs[mypkg]
+ continue
+ mycat=string.strip(self.remotepkgs[mypkg]["CATEGORY"])
+ fullpkg=mycat+"/"+mypkg[:-5]
+ mykey=dep_getkey(fullpkg)
+ try:
+ # invalid tbz2's can hurt things.
+ #print "cpv_inject("+str(fullpkg)+")"
+ self.dbapi.cpv_inject(fullpkg)
+ #print " -- Injected"
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n")
+ del self.remotepkgs[mypkg]
+ continue
+ self.populated=1
+
+ def inject(self,cpv):
+ return self.dbapi.cpv_inject(cpv)
+
+ def exists_specific(self,cpv):
+ if not self.populated:
+ self.populate()
+ return self.dbapi.match(dep_expand("="+cpv,mydb=self.dbapi))
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method -- all matches, not just visible ones"
+ if not self.populated:
+ self.populate()
+ writemsg("\n\n", 1)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mydep=dep_expand(mydep,mydb=self.dbapi)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mykey=dep_getkey(mydep)
+ writemsg("mykey: %s\n" % mykey, 1)
+ mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
+ writemsg("mymatch: %s\n" % mymatch, 1)
+ if mymatch==None:
+ return ""
+ return mymatch
+
+ def getname(self,pkgname):
+ "returns file location for this particular package"
+ mysplit=string.split(pkgname,"/")
+ if len(mysplit)==1:
+ return self.pkgdir+"/All/"+self.resolve_specific(pkgname)+".tbz2"
+ else:
+ return self.pkgdir+"/All/"+mysplit[1]+".tbz2"
+
+ def isremote(self,pkgname):
+ "Returns true if the package is kept remotely."
+ mysplit=string.split(pkgname,"/")
+ remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
+ return remote
+
+ def get_use(self,pkgname):
+ mysplit=string.split(pkgname,"/")
+ if self.isremote(pkgname):
+ return string.split(self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:])
+ tbz2=xpak.tbz2(self.getname(pkgname))
+ return string.split(tbz2.getfile("USE"))
+
+ def gettbz2(self,pkgname):
+ "fetches the package from a remote site, if necessary."
+ print "Fetching '"+str(pkgname)+"'"
+ mysplit = string.split(pkgname,"/")
+ tbz2name = mysplit[1]+".tbz2"
+ if not self.isremote(pkgname):
+ if (tbz2name not in self.invalids):
+ return
+ else:
+ writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n")
+ mydest = self.pkgdir+"/All/"
+ try:
+ os.makedirs(mydest, 0775)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ getbinpkg.file_get(settings["PORTAGE_BINHOST"]+"/"+tbz2name, mydest, fcmd=settings["RESUMECOMMAND"])
+ return
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+ return myslot
+
+class dblink:
+ "this class provides an interface to the standard text package database"
+ def __init__(self,cat,pkg,myroot,mysettings):
+ "create a dblink object for cat/pkg. This dblink entry may or may not exist"
+ self.cat = cat
+ self.pkg = pkg
+ self.mycpv = self.cat+"/"+self.pkg
+ self.mysplit = pkgsplit(self.mycpv)
+
+ self.dbroot = os.path.normpath(myroot+VDB_PATH)
+ self.dbcatdir = self.dbroot+"/"+cat
+ self.dbpkgdir = self.dbcatdir+"/"+pkg
+ self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
+ self.dbdir = self.dbpkgdir
+
+ self.lock_pkg = None
+ self.lock_tmp = None
+ self.lock_num = 0 # Count of the held locks on the db.
+
+ self.settings = mysettings
+ if self.settings==1:
+ raise ValueError
+
+ self.myroot=myroot
+ self.updateprotect()
+ self.contentscache=[]
+
+ def lockdb(self):
+ if self.lock_num == 0:
+ self.lock_pkg = portage_locks.lockdir(self.dbpkgdir)
+ self.lock_tmp = portage_locks.lockdir(self.dbtmpdir)
+ self.lock_num += 1
+
+ def unlockdb(self):
+ self.lock_num -= 1
+ if self.lock_num == 0:
+ portage_locks.unlockdir(self.lock_tmp)
+ portage_locks.unlockdir(self.lock_pkg)
+
+ def getpath(self):
+ "return path to location of db information (for >>> informational display)"
+ return self.dbdir
+
+ def exists(self):
+ "does the db entry exist? boolean."
+ return os.path.exists(self.dbdir)
+
+ def create(self):
+ "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
+ # XXXXX Delete this eventually
+ raise Exception, "This is bad. Don't use it."
+ if not os.path.exists(self.dbdir):
+ os.makedirs(self.dbdir)
+
+ def delete(self):
+ "erase this db entry completely"
+ if not os.path.exists(self.dbdir):
+ return
+ try:
+ for x in listdir(self.dbdir):
+ os.unlink(self.dbdir+"/"+x)
+ os.rmdir(self.dbdir)
+ except OSError, e:
+ print "!!! Unable to remove db entry for this package."
+ print "!!! It is possible that a directory is in this one. Portage will still"
+ print "!!! register this package as installed as long as this directory exists."
+ print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
+ print "!!! "+str(e)
+ print
+ sys.exit(1)
+
+ def clearcontents(self):
+ if os.path.exists(self.dbdir+"/CONTENTS"):
+ os.unlink(self.dbdir+"/CONTENTS")
+
+ def getcontents(self):
+ if not os.path.exists(self.dbdir+"/CONTENTS"):
+ return None
+ if self.contentscache != []:
+ return self.contentscache
+ pkgfiles={}
+ myc=open(self.dbdir+"/CONTENTS","r")
+ mylines=myc.readlines()
+ myc.close()
+ pos=1
+ for line in mylines:
+ mydat = string.split(line)
+ # we do this so we can remove from non-root filesystems
+ # (use the ROOT var to allow maintenance on other partitions)
+ try:
+ mydat[1]=os.path.normpath(root+mydat[1][1:])
+ if mydat[0]=="obj":
+ #format: type, mtime, md5sum
+ pkgfiles[string.join(mydat[1:-2]," ")]=[mydat[0], mydat[-1], mydat[-2]]
+ elif mydat[0]=="dir":
+ #format: type
+ pkgfiles[string.join(mydat[1:])]=[mydat[0] ]
+ elif mydat[0]=="sym":
+ #format: type, mtime, dest
+ x=len(mydat)-1
+ if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
+ mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
+ writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
+ x=len(mydat)-1
+ splitter=-1
+ while(x>=0):
+ if mydat[x]=="->":
+ splitter=x
+ break
+ x=x-1
+ if splitter==-1:
+ return None
+ pkgfiles[string.join(mydat[1:splitter]," ")]=[mydat[0], mydat[-1], string.join(mydat[(splitter+1):-1]," ")]
+ elif mydat[0]=="dev":
+ #format: type
+ pkgfiles[string.join(mydat[1:]," ")]=[mydat[0] ]
+ elif mydat[0]=="fif":
+ #format: type
+ pkgfiles[string.join(mydat[1:]," ")]=[mydat[0]]
+ else:
+ return None
+ except (KeyError,IndexError):
+ print "portage: CONTENTS line",pos,"corrupt!"
+ pos += 1
+ self.contentscache=pkgfiles
+ return pkgfiles
+
+ def updateprotect(self):
+ #do some config file management prep
+ self.protect=[]
+ for x in string.split(self.settings["CONFIG_PROTECT"]):
+ ppath=normalize_path(self.myroot+x)+"/"
+ if os.path.isdir(ppath):
+ self.protect.append(ppath)
+
+ self.protectmask=[]
+ for x in string.split(self.settings["CONFIG_PROTECT_MASK"]):
+ ppath=normalize_path(self.myroot+x)+"/"
+ if os.path.isdir(ppath):
+ self.protectmask.append(ppath)
+ #if it doesn't exist, silently skip it
+
+ def isprotected(self,obj):
+ """Checks if obj is in the current protect/mask directories. Returns
+ 0 on unprotected/masked, and 1 on protected."""
+ masked=0
+ protected=0
+ for ppath in self.protect:
+ if (len(ppath) > masked) and (obj[0:len(ppath)]==ppath):
+ protected=len(ppath)
+ #config file management
+ for pmpath in self.protectmask:
+ if (len(pmpath) >= protected) and (obj[0:len(pmpath)]==pmpath):
+ #skip, it's in the mask
+ masked=len(pmpath)
+ return (protected > masked)
+
+ def unmerge(self,pkgfiles=None,trimworld=1,cleanup=0):
+ global dircache
+ dircache={}
+
+ self.lockdb()
+
+ self.settings.load_infodir(self.dbdir)
+
+ if not pkgfiles:
+ print "No package files given... Grabbing a set."
+ pkgfiles=self.getcontents()
+
+ # Now, don't assume that the name of the ebuild is the same as the
+ # name of the dir; the package may have been moved.
+ myebuildpath=None
+
+ # We should use the environement file if possible,
+ # as it has all sourced files already included.
+ # XXX: Need to ensure it doesn't overwrite any important vars though.
+ if os.access(self.dbdir+"/environment.bz2", os.R_OK):
+ spawn("bzip2 -d "+self.dbdir+"/environment.bz2",self.settings,free=1)
+
+ if not myebuildpath:
+ mystuff=listdir(self.dbdir,EmptyOnError=1)
+ for x in mystuff:
+ if x[-7:]==".ebuild":
+ myebuildpath=self.dbdir+"/"+x
+ break
+
+ #do prerm script
+ if myebuildpath and os.path.exists(myebuildpath):
+ a=doebuild(myebuildpath,"prerm",self.myroot,self.settings,cleanup=cleanup,use_cache=0,tree="vartree")
+ # XXX: Decide how to handle failures here.
+ if a != 0:
+ writemsg("!!! FAILED prerm: "+str(a)+"\n")
+ sys.exit(123)
+
+ if pkgfiles:
+ mykeys=pkgfiles.keys()
+ mykeys.sort()
+ mykeys.reverse()
+
+ self.updateprotect()
+
+ #process symlinks second-to-last, directories last.
+ mydirs=[]
+ mysyms=[]
+ modprotect="/lib/modules/"
+ for obj in mykeys:
+ obj=os.path.normpath(obj)
+ if obj[:2]=="//":
+ obj=obj[1:]
+ if not os.path.exists(obj):
+ if not os.path.islink(obj):
+ #we skip this if we're dealing with a symlink
+ #because os.path.exists() will operate on the
+ #link target rather than the link itself.
+ print "--- !found "+str(pkgfiles[obj][0]), obj
+ continue
+ # next line includes a tweak to protect modules from being unmerged,
+ # but we don't protect modules from being overwritten if they are
+ # upgraded. We effectively only want one half of the config protection
+ # functionality for /lib/modules. For portage-ng both capabilities
+ # should be able to be independently specified.
+ if self.isprotected(obj) or ((len(obj) > len(modprotect)) and (obj[0:len(modprotect)]==modprotect)):
+ print "--- cfgpro "+str(pkgfiles[obj][0]), obj
+ continue
+
+ lstatobj=os.lstat(obj)
+ lmtime=str(lstatobj[stat.ST_MTIME])
+ if (pkgfiles[obj][0] not in ("dir","fif","dev","sym")) and (lmtime != pkgfiles[obj][1]):
+ print "--- !mtime", pkgfiles[obj][0], obj
+ continue
+
+ if pkgfiles[obj][0]=="dir":
+ if not os.path.isdir(obj):
+ print "--- !dir ","dir", obj
+ continue
+ mydirs.append(obj)
+ elif pkgfiles[obj][0]=="sym":
+ if not os.path.islink(obj):
+ print "--- !sym ","sym", obj
+ continue
+ mysyms.append(obj)
+ elif pkgfiles[obj][0]=="obj":
+ if not os.path.isfile(obj):
+ print "--- !obj ","obj", obj
+ continue
+ mymd5=portage_checksum.perform_md5(obj, calc_prelink=1)
+
+ # string.lower is needed because db entries used to be in upper-case. The
+ # string.lower allows for backwards compatibility.
+ if mymd5 != string.lower(pkgfiles[obj][2]):
+ print "--- !md5 ","obj", obj
+ continue
+ try:
+ os.unlink(obj)
+ except (OSError,IOError),e:
+ pass
+ print "<<< ","obj",obj
+ elif pkgfiles[obj][0]=="fif":
+ if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
+ print "--- !fif ","fif", obj
+ continue
+ try:
+ os.unlink(obj)
+ except (OSError,IOError),e:
+ pass
+ print "<<< ","fif",obj
+ elif pkgfiles[obj][0]=="dev":
+ print "--- ","dev",obj
+
+ #Now, we need to remove symlinks and directories. We'll repeatedly
+ #remove dead symlinks, then directories until we stop making progress.
+ #This is how we'll clean up directories containing symlinks pointing to
+ #directories that are now empty. These cases will require several
+ #iterations through our two-stage symlink/directory cleaning loop.
+
+ #main symlink and directory removal loop:
+
+ #progress -- are we making progress? Initialized to 1 so loop will start
+ progress=1
+ while progress:
+ #let's see if we're able to make progress this iteration...
+ progress=0
+
+ #step 1: remove all the dead symlinks we can...
+
+ pos = 0
+ while pos<len(mysyms):
+ obj=mysyms[pos]
+ if os.path.exists(obj):
+ pos += 1
+ else:
+ #we have a dead symlink; remove it from our list, then from existence
+ del mysyms[pos]
+ #we've made progress!
+ progress = 1
+ try:
+ os.unlink(obj)
+ print "<<< ","sym",obj
+ except (OSError,IOError),e:
+ print "!!! ","sym",obj
+ #immutable?
+ pass
+
+ #step 2: remove all the empty directories we can...
+
+ pos = 0
+ while pos<len(mydirs):
+ obj=mydirs[pos]
+ objld=listdir(obj)
+
+ if objld == None:
+ print "mydirs["+str(pos)+"]",mydirs[pos]
+ print "obj",obj
+ print "objld",objld
+ # the directory doesn't exist yet, continue
+ pos += 1
+ continue
+
+ if len(objld)>0:
+ #we won't remove this directory (yet), continue
+ pos += 1
+ continue
+ elif (objld != None):
+ #zappo time
+ del mydirs[pos]
+ #we've made progress!
+ progress = 1
+ try:
+ os.rmdir(obj)
+ print "<<< ","dir",obj
+ except (OSError,IOError),e:
+ #immutable?
+ pass
+ #else:
+ # print "--- !empty","dir", obj
+ # continue
+
+ #step 3: if we've made progress, we'll give this another go...
+
+ #step 4: otherwise, we'll print out the remaining stuff that we didn't unmerge (and rightly so!)
+
+ #directories that aren't empty:
+ for x in mydirs:
+ print "--- !empty dir", x
+
+ #symlinks whose target still exists:
+ for x in mysyms:
+ print "--- !targe sym", x
+
+ #step 5: well, removal of package objects is complete, now for package *meta*-objects....
+
+ #remove self from vartree database so that our own virtual gets zapped if we're the last node
+ db[self.myroot]["vartree"].zap(self.mycpv)
+
+ # New code to remove stuff from the world and virtuals files when unmerged.
+ if trimworld:
+ worldlist=grabfile(self.myroot+WORLD_FILE)
+ mykey=cpv_getkey(self.mycpv)
+ newworldlist=[]
+ for x in worldlist:
+ if dep_getkey(x)==mykey:
+ matches=db[self.myroot]["vartree"].dbapi.match(x,use_cache=0)
+ if not matches:
+ #zap our world entry
+ pass
+ elif (len(matches)==1) and (matches[0]==self.mycpv):
+ #zap our world entry
+ pass
+ else:
+ #others are around; keep it.
+ newworldlist.append(x)
+ else:
+ #this doesn't match the package we're unmerging; keep it.
+ newworldlist.append(x)
+
+ # if the base dir doesn't exist, create it.
+ # (spanky noticed bug)
+ # XXX: dumb question, but abstracting the root uid might be wise/useful for
+ # 2nd pkg manager installation setups.
+ if not os.path.exists(os.path.dirname(self.myroot+WORLD_FILE)):
+ pdir = os.path.dirname(self.myroot + WORLD_FILE)
+ os.makedirs(pdir, mode=0755)
+ os.chown(pdir, 0, portage_gid)
+ os.chmod(pdir, 02770)
+
+ myworld=open(self.myroot+WORLD_FILE,"w")
+ for x in newworldlist:
+ myworld.write(x+"\n")
+ myworld.close()
+
+ #do original postrm
+ if myebuildpath and os.path.exists(myebuildpath):
+ # XXX: This should be the old config, not the current one.
+ # XXX: Use vardbapi to load up env vars.
+ a=doebuild(myebuildpath,"postrm",self.myroot,self.settings,use_cache=0,tree="vartree")
+ # XXX: Decide how to handle failures here.
+ if a != 0:
+ writemsg("!!! FAILED postrm: "+str(a)+"\n")
+ sys.exit(123)
+
+ self.unlockdb()
+
+ def isowner(self,filename,destroot):
+ """ check if filename is a new file or belongs to this package
+ (for this or a previous version)"""
+ destfile = os.path.normpath(destroot+"/"+filename)
+ if not os.path.exists(destfile):
+ return True
+ if self.getcontents() and filename in self.getcontents().keys():
+ return True
+
+ return False
+
+ def treewalk(self,srcroot,destroot,inforoot,myebuild,cleanup=0):
+ global db
+ # srcroot = ${D};
+ # destroot = where to merge, ie. ${ROOT},
+ # inforoot = root of db entry,
+ # secondhand = list of symlinks that have been skipped due to
+ # their target not existing (will merge later),
+
+ if not os.path.exists(self.dbcatdir):
+ os.makedirs(self.dbcatdir)
+
+ # This blocks until we can get the dirs to ourselves.
+ self.lockdb()
+
+ otherversions=[]
+ for v in db[self.myroot]["vartree"].dbapi.cp_list(self.mysplit[0]):
+ otherversions.append(v.split("/")[1])
+
+ # check for package collisions
+ if "collision-protect" in features:
+ myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
+
+ # the linkcheck only works if we are in srcroot
+ mycwd = os.getcwd()
+ os.chdir(srcroot)
+ mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
+
+ stopmerge=False
+ starttime=time.time()
+ i=0
+
+ otherpkg=[]
+ mypkglist=[]
+
+ if self.pkg in otherversions:
+ otherversions.remove(self.pkg) # we already checked this package
+
+ for v in otherversions:
+ # should we check for same SLOT here ?
+ mypkglist.append(dblink(self.cat,v,destroot,self.settings))
+
+ print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
+ for f in myfilelist:
+ nocheck = False
+ # listdir isn't intelligent enough to exclude symlinked dirs,
+ # so we have to do it ourself
+ for s in mysymlinks:
+ # the length comparison makes sure that the symlink itself is checked
+ if f[:len(s)] == s and len(f) > len(s):
+ nocheck = True
+ if nocheck:
+ continue
+ i=i+1
+ if i % 1000 == 0:
+ print str(i)+" files checked ..."
+ if f[0] != "/":
+ f="/"+f
+ isowned = False
+ for ver in [self]+mypkglist:
+ if (ver.isowner(f, destroot) or ver.isprotected(f)):
+ isowned = True
+ break
+ if not isowned:
+ print "existing file "+f+" is not owned by this package"
+ stopmerge=True
+ print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
+ if stopmerge:
+ print red("*")+" This package is blocked because it wants to overwrite"
+ print red("*")+" files belonging to other packages (see messages above)."
+ print red("*")+" If you have no clue what this is all about report it "
+ print red("*")+" as a bug for this package on http://bugs.gentoo.org"
+ print
+ print red("package "+self.cat+"/"+self.pkg+" NOT merged")
+ print
+ # Why is the package already merged here db-wise? Shouldn't be the case
+ # only unmerge if it ia new package and has no contents
+ if not self.getcontents():
+ self.unmerge()
+ self.delete()
+ self.unlockdb()
+ sys.exit(1)
+ try:
+ os.chdir(mycwd)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+
+
+ # get old contents info for later unmerging
+ oldcontents = self.getcontents()
+
+ self.dbdir = self.dbtmpdir
+ self.delete()
+ if not os.path.exists(self.dbtmpdir):
+ os.makedirs(self.dbtmpdir)
+
+ print ">>> Merging",self.mycpv,"to",destroot
+
+ # run preinst script
+ if myebuild:
+ # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
+ # (if any).
+ a=doebuild(myebuild,"preinst",root,self.settings,cleanup=cleanup,use_cache=0)
+ else:
+ a=doebuild(inforoot+"/"+self.pkg+".ebuild","preinst",root,self.settings,cleanup=cleanup,use_cache=0)
+
+ # XXX: Decide how to handle failures here.
+ if a != 0:
+ writemsg("!!! FAILED preinst: "+str(a)+"\n")
+ sys.exit(123)
+
+ # copy "info" files (like SLOT, CFLAGS, etc.) into the database
+ for x in listdir(inforoot):
+ self.copyfile(inforoot+"/"+x)
+
+ # get current counter value (counter_tick also takes care of incrementing it)
+ # XXX Need to make this destroot, but it needs to be initialized first. XXX
+ # XXX bis: leads to some invalidentry() call through cp_all().
+ counter = db["/"]["vartree"].dbapi.counter_tick(self.myroot,mycpv=self.mycpv)
+ # write local package counter for recording
+ lcfile = open(self.dbtmpdir+"/COUNTER","w")
+ lcfile.write(str(counter))
+ lcfile.close()
+
+ # open CONTENTS file (possibly overwriting old one) for recording
+ outfile=open(self.dbtmpdir+"/CONTENTS","w")
+
+ self.updateprotect()
+
+ #if we have a file containing previously-merged config file md5sums, grab it.
+ if os.path.exists(destroot+CONFIG_MEMORY_FILE):
+ cfgfiledict=grabdict(destroot+CONFIG_MEMORY_FILE)
+ else:
+ cfgfiledict={}
+ if self.settings.has_key("NOCONFMEM"):
+ cfgfiledict["IGNORE"]=1
+ else:
+ cfgfiledict["IGNORE"]=0
+
+ # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
+ mymtime = long(time.time())
+ prevmask = os.umask(0)
+ secondhand = []
+
+ # we do a first merge; this will recurse through all files in our srcroot but also build up a
+ # "second hand" of symlinks to merge later
+ if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
+ return 1
+
+ # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
+ # broken symlinks. We'll merge them too.
+ lastlen=0
+ while len(secondhand) and len(secondhand)!=lastlen:
+ # clear the thirdhand. Anything from our second hand that
+ # couldn't get merged will be added to thirdhand.
+
+ thirdhand=[]
+ self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
+
+ #swap hands
+ lastlen=len(secondhand)
+
+ # our thirdhand now becomes our secondhand. It's ok to throw
+ # away secondhand since thirdhand contains all the stuff that
+ # couldn't be merged.
+ secondhand = thirdhand
+
+ if len(secondhand):
+ # force merge of remaining symlinks (broken or circular; oh well)
+ self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
+
+ #restore umask
+ os.umask(prevmask)
+
+ #if we opened it, close it
+ outfile.flush()
+ outfile.close()
+
+ if (oldcontents):
+ print ">>> Safely unmerging already-installed instance..."
+ self.dbdir = self.dbpkgdir
+ self.unmerge(oldcontents,trimworld=0)
+ self.dbdir = self.dbtmpdir
+ print ">>> original instance of package unmerged safely."
+
+ # We hold both directory locks.
+ self.dbdir = self.dbpkgdir
+ self.delete()
+ movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+
+ self.unlockdb()
+
+ #write out our collection of md5sums
+ if cfgfiledict.has_key("IGNORE"):
+ del cfgfiledict["IGNORE"]
+
+ # XXXX: HACK! PathSpec is very necessary here.
+ if not os.path.exists(destroot+PRIVATE_PATH):
+ os.makedirs(destroot+PRIVATE_PATH)
+ os.chown(destroot+PRIVATE_PATH,os.getuid(),portage_gid)
+ os.chmod(destroot+PRIVATE_PATH,02770)
+ dirlist = prefix_array(listdir(destroot+PRIVATE_PATH),destroot+PRIVATE_PATH+"/")
+ while dirlist:
+ dirlist.sort()
+ dirlist.reverse() # Gets them in file-before basedir order
+ x = dirlist[0]
+ if os.path.isdir(x):
+ dirlist += prefix_array(listdir(x),x+"/")
+ continue
+ os.unlink(destroot+PRIVATE_PATH+"/"+x)
+
+ mylock = portage_locks.lockfile(destroot+CONFIG_MEMORY_FILE)
+ writedict(cfgfiledict,destroot+CONFIG_MEMORY_FILE)
+ portage_locks.unlockfile(mylock)
+
+ #do postinst script
+ if myebuild:
+ # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
+ # (if any).
+ a=doebuild(myebuild,"postinst",root,self.settings,use_cache=0)
+ else:
+ a=doebuild(inforoot+"/"+self.pkg+".ebuild","postinst",root,self.settings,use_cache=0)
+
+ # XXX: Decide how to handle failures here.
+ if a != 0:
+ writemsg("!!! FAILED postinst: "+str(a)+"\n")
+ sys.exit(123)
+
+ downgrade = False
+ for v in otherversions:
+ if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
+ downgrade = True
+
+ #update environment settings, library paths. DO NOT change symlinks.
+ env_update(makelinks=(not downgrade))
+ #dircache may break autoclean because it remembers the -MERGING-pkg file
+ global dircache
+ if dircache.has_key(self.dbcatdir):
+ del dircache[self.dbcatdir]
+ print ">>>",self.mycpv,"merged."
+ return 0
+
+ def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
+ srcroot=os.path.normpath("///"+srcroot)+"/"
+ destroot=os.path.normpath("///"+destroot)+"/"
+ # this is supposed to merge a list of files. There will be 2 forms of argument passing.
+ if type(stufftomerge)==types.StringType:
+ #A directory is specified. Figure out protection paths, listdir() it and process it.
+ mergelist=listdir(srcroot+stufftomerge)
+ offset=stufftomerge
+ # We need mydest defined up here to calc. protection paths. This is now done once per
+ # directory rather than once per file merge. This should really help merge performance.
+ # Trailing / ensures that protects/masks with trailing /'s match.
+ mytruncpath="/"+offset+"/"
+ myppath=self.isprotected(mytruncpath)
+ else:
+ mergelist=stufftomerge
+ offset=""
+ for x in mergelist:
+ mysrc=os.path.normpath("///"+srcroot+offset+x)
+ mydest=os.path.normpath("///"+destroot+offset+x)
+ # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
+ myrealdest="/"+offset+x
+ # stat file once, test using S_* macros many times (faster that way)
+ try:
+ mystat=os.lstat(mysrc)
+ except SystemExit, e:
+ raise
+ except OSError, e:
+ writemsg("\n")
+ writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
+ writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
+ writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
+ writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
+ writemsg(red("!!! File: ")+str(mysrc)+"\n")
+ writemsg(red("!!! Error: ")+str(e)+"\n")
+ sys.exit(1)
+ except Exception, e:
+ writemsg("\n")
+ writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
+ writemsg(red("!!! A stat call returned the following error for the following file:"))
+ writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
+ writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
+ writemsg( "!!! File: "+str(mysrc)+"\n")
+ writemsg( "!!! Error: "+str(e)+"\n")
+ sys.exit(1)
+
+
+ mymode=mystat[stat.ST_MODE]
+ # handy variables; mydest is the target object on the live filesystems;
+ # mysrc is the source object in the temporary install dir
+ try:
+ mydmode=os.lstat(mydest)[stat.ST_MODE]
+ except SystemExit, e:
+ raise
+ except:
+ #dest file doesn't exist
+ mydmode=None
+
+ if stat.S_ISLNK(mymode):
+ # we are merging a symbolic link
+ myabsto=abssymlink(mysrc)
+ if myabsto[0:len(srcroot)]==srcroot:
+ myabsto=myabsto[len(srcroot):]
+ if myabsto[0]!="/":
+ myabsto="/"+myabsto
+ myto=os.readlink(mysrc)
+ if self.settings and self.settings["D"]:
+ if myto.find(self.settings["D"])==0:
+ myto=myto[len(self.settings["D"]):]
+ # myrealto contains the path of the real file to which this symlink points.
+ # we can simply test for existence of this file to see if the target has been merged yet
+ myrealto=os.path.normpath(os.path.join(destroot,myabsto))
+ if mydmode!=None:
+ #destination exists
+ if not stat.S_ISLNK(mydmode):
+ if stat.S_ISDIR(mydmode):
+ # directory in the way: we can't merge a symlink over a directory
+ # we won't merge this, continue with next file...
+ continue
+ srctarget = os.path.normpath(os.path.dirname(mysrc)+"/"+myto)
+ if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
+ # Kill file blocking installation of symlink to dir #71787
+ pass
+ elif self.isprotected(mydest):
+ # Use md5 of the target in ${D} if it exists...
+ if os.path.exists(os.path.normpath(srcroot+myabsto)):
+ mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(srcroot+myabsto))
+ else:
+ mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(myabsto))
+
+ # if secondhand==None it means we're operating in "force" mode and should not create a second hand.
+ if (secondhand!=None) and (not os.path.exists(myrealto)):
+ # either the target directory doesn't exist yet or the target file doesn't exist -- or
+ # the target is a broken symlink. We will add this file to our "second hand" and merge
+ # it later.
+ secondhand.append(mysrc[len(srcroot):])
+ continue
+ # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
+ mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
+ if mymtime!=None:
+ print ">>>",mydest,"->",myto
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ else:
+ print "!!! Failed to move file."
+ print "!!!",mydest,"->",myto
+ sys.exit(1)
+ elif stat.S_ISDIR(mymode):
+ # we are merging a directory
+ if mydmode!=None:
+ # destination exists
+
+ if bsd_chflags:
+ # Save then clear flags on dest.
+ dflags=bsd_chflags.lgetflags(mydest)
+ if(bsd_chflags.lchflags(mydest, 0)<0):
+ writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n")
+
+ if not os.access(mydest, os.W_OK):
+ pkgstuff = pkgsplit(self.pkg)
+ writemsg("\n!!! Cannot write to '"+mydest+"'.\n")
+ writemsg("!!! Please check permissions and directories for broken symlinks.\n")
+ writemsg("!!! You may start the merge process again by using ebuild:\n")
+ writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
+ writemsg("!!! And finish by running this: env-update\n\n")
+ return 1
+
+ if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
+ # a symlink to an existing directory will work for us; keep it:
+ print "---",mydest+"/"
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ else:
+ # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
+ if movefile(mydest,mydest+".backup", mysettings=self.settings) == None:
+ sys.exit(1)
+ print "bak",mydest,mydest+".backup"
+ #now create our directory
+ if selinux_enabled:
+ sid = selinux.get_sid(mysrc)
+ selinux.secure_mkdir(mydest,sid)
+ else:
+ os.mkdir(mydest)
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ os.chmod(mydest,mystat[0])
+ lchown(mydest,mystat[4],mystat[5])
+ print ">>>",mydest+"/"
+ else:
+ #destination doesn't exist
+ if selinux_enabled:
+ sid = selinux.get_sid(mysrc)
+ selinux.secure_mkdir(mydest,sid)
+ else:
+ os.mkdir(mydest)
+ os.chmod(mydest,mystat[0])
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, bsd_chflags.lgetflags(mysrc))
+ lchown(mydest,mystat[4],mystat[5])
+ print ">>>",mydest+"/"
+ outfile.write("dir "+myrealdest+"\n")
+ # recurse and merge this directory
+ if self.mergeme(srcroot,destroot,outfile,secondhand,offset+x+"/",cfgfiledict,thismtime):
+ return 1
+ elif stat.S_ISREG(mymode):
+ # we are merging a regular file
+ mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
+ # calculate config file protection stuff
+ mydestdir=os.path.dirname(mydest)
+ moveme=1
+ zing="!!!"
+ if mydmode!=None:
+ # destination file exists
+ if stat.S_ISDIR(mydmode):
+ # install of destination is blocked by an existing directory with the same name
+ moveme=0
+ print "!!!",mydest
+ elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
+ cfgprot=0
+ # install of destination is blocked by an existing regular file,
+ # or by a symlink to an existing regular file;
+ # now, config file management may come into play.
+ # we only need to tweak mydest if cfg file management is in play.
+ if myppath:
+ # we have a protection path; enable config file management.
+ destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
+ cycled=0
+ if cfgfiledict.has_key(myrealdest):
+ if destmd5 in cfgfiledict[myrealdest]:
+ #cycle
+ print "cycle"
+ del cfgfiledict[myrealdest]
+ cycled=1
+ if mymd5==destmd5:
+ #file already in place; simply update mtimes of destination
+ os.utime(mydest,(thismtime,thismtime))
+ zing="---"
+ moveme=0
+ elif cycled:
+ #mymd5!=destmd5 and we've cycled; move mysrc into place as a ._cfg file
+ moveme=1
+ cfgfiledict[myrealdest]=[mymd5]
+ cfgprot=1
+ elif cfgfiledict.has_key(myrealdest) and (mymd5 in cfgfiledict[myrealdest]):
+ #myd5!=destmd5, we haven't cycled, and the file we're merging has been already merged previously
+ zing="-o-"
+ moveme=cfgfiledict["IGNORE"]
+ cfgprot=cfgfiledict["IGNORE"]
+ else:
+ #mymd5!=destmd5, we haven't cycled, and the file we're merging hasn't been merged before
+ moveme=1
+ cfgprot=1
+ if not cfgfiledict.has_key(myrealdest):
+ cfgfiledict[myrealdest]=[]
+ if mymd5 not in cfgfiledict[myrealdest]:
+ cfgfiledict[myrealdest].append(mymd5)
+ # only record the last md5
+ if len(cfgfiledict[myrealdest])>1:
+ del cfgfiledict[myrealdest][0]
+
+ if cfgprot:
+ mydest = new_protect_filename(myrealdest, newmd5=mymd5)
+
+ # whether config protection or not, we merge the new file the
+ # same way. Unless moveme=0 (blocking directory)
+ if moveme:
+ mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
+ if mymtime == None:
+ sys.exit(1)
+ zing=">>>"
+ else:
+ mymtime=thismtime
+ # We need to touch the destination so that on --update the
+ # old package won't yank the file with it. (non-cfgprot related)
+ os.utime(myrealdest,(thismtime,thismtime))
+ zing="---"
+ if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
+
+ # XXX kludge, bug #58848; can be killed when portage stops relying on
+ # md5+mtime, and uses refcounts
+ # alright, we've fooled w/ mtime on the file; this pisses off static archives
+ # basically internal mtime != file's mtime, so the linker (falsely) thinks
+ # the archive is stale, and needs to have it's toc rebuilt.
+
+ myf=open(myrealdest,"r+")
+
+ # ar mtime field is digits padded with spaces, 12 bytes.
+ lms=str(thismtime+5).ljust(12)
+ myf.seek(0)
+ magic=myf.read(8)
+ if magic != "!<arch>\n":
+ # not an archive (dolib.a from portage.py makes it here fex)
+ myf.close()
+ else:
+ st=os.stat(myrealdest)
+ while myf.tell() < st.st_size - 12:
+ # skip object name
+ myf.seek(16,1)
+
+ # update mtime
+ myf.write(lms)
+
+ # skip uid/gid/mperm
+ myf.seek(20,1)
+
+ # read the archive member's size
+ x=long(myf.read(10))
+
+ # skip the trailing newlines, and add the potential
+ # extra padding byte if it's not an even size
+ myf.seek(x + 2 + (x % 2),1)
+
+ # and now we're at the end. yay.
+ myf.close()
+ mymd5=portage_checksum.perform_md5(myrealdest,calc_prelink=1)
+ os.utime(myrealdest,(thismtime,thismtime))
+
+ if mymtime!=None:
+ zing=">>>"
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ print zing,mydest
+ else:
+ # we are merging a fifo or device node
+ zing="!!!"
+ if mydmode==None:
+ # destination doesn't exist
+ if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
+ zing=">>>"
+ if stat.S_ISFIFO(mymode):
+ # we don't record device nodes in CONTENTS,
+ # although we do merge them.
+ outfile.write("fif "+myrealdest+"\n")
+ else:
+ sys.exit(1)
+ print zing+" "+mydest
+
+ def merge(self,mergeroot,inforoot,myroot,myebuild=None,cleanup=0):
+ return self.treewalk(mergeroot,myroot,inforoot,myebuild,cleanup=cleanup)
+
+ def getstring(self,name):
+ "returns contents of a file with whitespace converted to spaces"
+ if not os.path.exists(self.dbdir+"/"+name):
+ return ""
+ myfile=open(self.dbdir+"/"+name,"r")
+ mydata=string.split(myfile.read())
+ myfile.close()
+ return string.join(mydata," ")
+
+ def copyfile(self,fname):
+ shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
+
+ def getfile(self,fname):
+ if not os.path.exists(self.dbdir+"/"+fname):
+ return ""
+ myfile=open(self.dbdir+"/"+fname,"r")
+ mydata=myfile.read()
+ myfile.close()
+ return mydata
+
+ def setfile(self,fname,data):
+ myfile=open(self.dbdir+"/"+fname,"w")
+ myfile.write(data)
+ myfile.close()
+
+ def getelements(self,ename):
+ if not os.path.exists(self.dbdir+"/"+ename):
+ return []
+ myelement=open(self.dbdir+"/"+ename,"r")
+ mylines=myelement.readlines()
+ myreturn=[]
+ for x in mylines:
+ for y in string.split(x[:-1]):
+ myreturn.append(y)
+ myelement.close()
+ return myreturn
+
+ def setelements(self,mylist,ename):
+ myelement=open(self.dbdir+"/"+ename,"w")
+ for x in mylist:
+ myelement.write(x+"\n")
+ myelement.close()
+
+ def isregular(self):
+ "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
+ return os.path.exists(self.dbdir+"/CATEGORY")
+
+def cleanup_pkgmerge(mypkg,origdir):
+ shutil.rmtree(settings["PORTAGE_TMPDIR"]+"/portage-pkg/"+mypkg)
+ if os.path.exists(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment"):
+ os.unlink(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment")
+ os.chdir(origdir)
+
+def pkgmerge(mytbz2,myroot,mysettings):
+ """will merge a .tbz2 file, returning a list of runtime dependencies
+ that must be satisfied, or None if there was a merge error. This
+ code assumes the package exists."""
+ if mytbz2[-5:]!=".tbz2":
+ print "!!! Not a .tbz2 file"
+ return None
+ mypkg=os.path.basename(mytbz2)[:-5]
+ xptbz2=xpak.tbz2(mytbz2)
+ pkginfo={}
+ mycat=xptbz2.getfile("CATEGORY")
+ if not mycat:
+ print "!!! CATEGORY info missing from info chunk, aborting..."
+ return None
+ mycat=mycat.strip()
+ mycatpkg=mycat+"/"+mypkg
+ tmploc=mysettings["PORTAGE_TMPDIR"]+"/portage-pkg/"
+ pkgloc=tmploc+"/"+mypkg+"/bin/"
+ infloc=tmploc+"/"+mypkg+"/inf/"
+ myebuild=tmploc+"/"+mypkg+"/inf/"+os.path.basename(mytbz2)[:-4]+"ebuild"
+ if os.path.exists(tmploc+"/"+mypkg):
+ shutil.rmtree(tmploc+"/"+mypkg,1)
+ os.makedirs(pkgloc)
+ os.makedirs(infloc)
+ print ">>> extracting info"
+ xptbz2.unpackinfo(infloc)
+ # run pkg_setup early, so we can bail out early
+ # (before extracting binaries) if there's a problem
+ origdir=getcwd()
+ os.chdir(pkgloc)
+
+ mysettings.configdict["pkg"]["CATEGORY"] = mycat;
+ a=doebuild(myebuild,"setup",myroot,mysettings,tree="bintree")
+ print ">>> extracting",mypkg
+ notok=spawn("bzip2 -dqc -- '"+mytbz2+"' | tar xpf -",mysettings,free=1)
+ if notok:
+ print "!!! Error extracting",mytbz2
+ cleanup_pkgmerge(mypkg,origdir)
+ return None
+
+ # the merge takes care of pre/postinst and old instance
+ # auto-unmerge, virtual/provides updates, etc.
+ mysettings.load_infodir(infloc)
+ mylink=dblink(mycat,mypkg,myroot,mysettings)
+ mylink.merge(pkgloc,infloc,myroot,myebuild,cleanup=1)
+
+ if not os.path.exists(infloc+"/RDEPEND"):
+ returnme=""
+ else:
+ #get runtime dependencies
+ a=open(infloc+"/RDEPEND","r")
+ returnme=string.join(string.split(a.read())," ")
+ a.close()
+ cleanup_pkgmerge(mypkg,origdir)
+ return returnme
+
+
+if os.environ.has_key("ROOT"):
+ root=os.environ["ROOT"]
+ if not len(root):
+ root="/"
+ elif root[-1]!="/":
+ root=root+"/"
+else:
+ root="/"
+if root != "/":
+ if not os.path.exists(root[:-1]):
+ writemsg("!!! Error: ROOT "+root+" does not exist. Please correct this.\n")
+ writemsg("!!! Exiting.\n\n")
+ sys.exit(1)
+ elif not os.path.isdir(root[:-1]):
+ writemsg("!!! Error: ROOT "+root[:-1]+" is not a directory. Please correct this.\n")
+ writemsg("!!! Exiting.\n\n")
+ sys.exit(1)
+
+#create tmp and var/tmp if they don't exist; read config
+os.umask(0)
+if not os.path.exists(root+"tmp"):
+ writemsg(">>> "+root+"tmp doesn't exist, creating it...\n")
+ os.mkdir(root+"tmp",01777)
+if not os.path.exists(root+"var/tmp"):
+ writemsg(">>> "+root+"var/tmp doesn't exist, creating it...\n")
+ try:
+ os.mkdir(root+"var",0755)
+ except (OSError,IOError):
+ pass
+ try:
+ os.mkdir(root+"var/tmp",01777)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("portage: couldn't create /var/tmp; exiting.\n")
+ sys.exit(1)
+if not os.path.exists(root+"var/lib/portage"):
+ writemsg(">>> "+root+"var/lib/portage doesn't exist, creating it...\n")
+ try:
+ os.mkdir(root+"var",0755)
+ except (OSError,IOError):
+ pass
+ try:
+ os.mkdir(root+"var/lib",0755)
+ except (OSError,IOError):
+ pass
+ try:
+ os.mkdir(root+"var/lib/portage",02750)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("portage: couldn't create /var/lib/portage; exiting.\n")
+ sys.exit(1)
+
+
+#####################################
+# Deprecation Checks
+
+os.umask(022)
+profiledir=None
+if "PORTAGE_CALLER" in os.environ and os.environ["PORTAGE_CALLER"] == "emerge" and os.path.isdir(PROFILE_PATH):
+ profiledir = PROFILE_PATH
+ if os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
+ deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
+ dcontent = deprecatedfile.readlines()
+ deprecatedfile.close()
+ newprofile = dcontent[0]
+ writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"))
+ writemsg(red("!!! Please upgrade to the following profile if possible:\n"))
+ writemsg(8*" "+green(newprofile)+"\n")
+ if len(dcontent) > 1:
+ writemsg("To upgrade do the following steps:\n")
+ for myline in dcontent[1:]:
+ writemsg(myline)
+ writemsg("\n\n")
+
+if os.path.exists(USER_VIRTUALS_FILE):
+ writemsg(red("\n!!! /etc/portage/virtuals is deprecated in favor of\n"))
+ writemsg(red("!!! /etc/portage/profile/virtuals. Please move it to\n"))
+ writemsg(red("!!! this new location.\n\n"))
+
+#
+#####################################
+
+db={}
+
+# =============================================================================
+# =============================================================================
+# -----------------------------------------------------------------------------
+# We're going to lock the global config to prevent changes, but we need
+# to ensure the global settings are right.
+settings=config(config_profile_path=PROFILE_PATH,config_incrementals=portage_const.INCREMENTALS)
+
+# useful info
+settings["PORTAGE_MASTER_PID"]=str(os.getpid())
+settings.backup_changes("PORTAGE_MASTER_PID")
+# We are disabling user-specific bashrc files.
+settings["BASH_ENV"] = INVALID_ENV_FILE
+settings.backup_changes("BASH_ENV")
+
+# gets virtual package settings
+def getvirtuals(myroot):
+ global settings
+ writemsg("--- DEPRECATED call to getvirtual\n")
+ return settings.getvirtuals(myroot)
+
+def do_vartree(mysettings):
+ global virts,virts_p
+ virts=mysettings.getvirtuals("/")
+ virts_p={}
+
+ if virts:
+ myvkeys=virts.keys()
+ for x in myvkeys:
+ vkeysplit=x.split("/")
+ if not virts_p.has_key(vkeysplit[1]):
+ virts_p[vkeysplit[1]]=virts[x]
+ db["/"]={"virtuals":virts,"vartree":vartree("/",virts)}
+ if root!="/":
+ virts=mysettings.getvirtuals(root)
+ db[root]={"virtuals":virts,"vartree":vartree(root,virts)}
+ #We need to create the vartree first, then load our settings, and then set up our other trees
+
+usedefaults=settings.use_defs
+
+# XXX: This is a circular fix.
+#do_vartree(settings)
+#settings.loadVirtuals('/')
+do_vartree(settings)
+#settings.loadVirtuals('/')
+
+settings.reset() # XXX: Regenerate use after we get a vartree -- GLOBAL
+
+
+# XXX: Might cause problems with root="/" assumptions
+portdb=portdbapi(settings["PORTDIR"])
+
+settings.lock()
+# -----------------------------------------------------------------------------
+# =============================================================================
+# =============================================================================
+
+
+if 'selinux' in settings["USE"].split(" "):
+ try:
+ import selinux
+ selinux_enabled=1
+ except OSError, e:
+ writemsg(red("!!! SELinux not loaded: ")+str(e)+"\n")
+ selinux_enabled=0
+ except ImportError:
+ writemsg(red("!!! SELinux module not found.")+" Please verify that it was installed.\n")
+ selinux_enabled=0
+else:
+ selinux_enabled=0
+
+cachedirs=[CACHE_PATH]
+if root!="/":
+ cachedirs.append(root+CACHE_PATH)
+if not os.environ.has_key("SANDBOX_ACTIVE"):
+ for cachedir in cachedirs:
+ if not os.path.exists(cachedir):
+ os.makedirs(cachedir,0755)
+ writemsg(">>> "+cachedir+" doesn't exist, creating it...\n")
+ if not os.path.exists(cachedir+"/dep"):
+ os.makedirs(cachedir+"/dep",2755)
+ writemsg(">>> "+cachedir+"/dep doesn't exist, creating it...\n")
+ try:
+ os.chown(cachedir,uid,portage_gid)
+ os.chmod(cachedir,0775)
+ except OSError:
+ pass
+ try:
+ mystat=os.lstat(cachedir+"/dep")
+ os.chown(cachedir+"/dep",uid,portage_gid)
+ os.chmod(cachedir+"/dep",02775)
+ if mystat[stat.ST_GID]!=portage_gid:
+ spawn("chown -R "+str(uid)+":"+str(portage_gid)+" "+cachedir+"/dep",settings,free=1)
+ spawn("chmod -R u+rw,g+rw "+cachedir+"/dep",settings,free=1)
+ except OSError:
+ pass
+
+def flushmtimedb(record):
+ if mtimedb:
+ if record in mtimedb.keys():
+ del mtimedb[record]
+ #print "mtimedb["+record+"] is cleared."
+ else:
+ writemsg("Invalid or unset record '"+record+"' in mtimedb.\n")
+
+#grab mtimes for eclasses and upgrades
+mtimedb={}
+mtimedbkeys=[
+"updates", "info",
+"version", "starttime",
+"resume", "ldpath"
+]
+mtimedbfile=root+"var/cache/edb/mtimedb"
+try:
+ mypickle=cPickle.Unpickler(open(mtimedbfile))
+ mypickle.find_global=None
+ mtimedb=mypickle.load()
+ if mtimedb.has_key("old"):
+ mtimedb["updates"]=mtimedb["old"]
+ del mtimedb["old"]
+ if mtimedb.has_key("cur"):
+ del mtimedb["cur"]
+except SystemExit, e:
+ raise
+except:
+ #print "!!!",e
+ mtimedb={"updates":{},"version":"","starttime":0}
+
+for x in mtimedb.keys():
+ if x not in mtimedbkeys:
+ writemsg("Deleting invalid mtimedb key: "+str(x)+"\n")
+ del mtimedb[x]
+
+#,"porttree":portagetree(root,virts),"bintree":binarytree(root,virts)}
+features=settings["FEATURES"].split()
+
+do_upgrade_packagesmessage=0
+def do_upgrade(mykey):
+ global do_upgrade_packagesmessage
+ writemsg("\n\n")
+ writemsg(green("Performing Global Updates: ")+bold(mykey)+"\n")
+ writemsg("(Could take a couple of minutes if you have a lot of binary packages.)\n")
+ writemsg(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
+ processed=1
+ #remove stale virtual entries (mappings for packages that no longer exist)
+
+ update_files={}
+ file_contents={}
+ myxfiles = ["package.mask","package.unmask","package.keywords","package.use"]
+ myxfiles = myxfiles + prefix_array(myxfiles, "profile/")
+ for x in myxfiles:
+ try:
+ myfile = open("/etc/portage/"+x,"r")
+ file_contents[x] = myfile.readlines()
+ myfile.close()
+ except IOError:
+ if file_contents.has_key(x):
+ del file_contents[x]
+ continue
+
+ worldlist=grabfile("/"+WORLD_FILE)
+ myupd=grabfile(mykey)
+ db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
+ for myline in myupd:
+ mysplit=myline.split()
+ if not len(mysplit):
+ continue
+ if mysplit[0]!="move" and mysplit[0]!="slotmove":
+ writemsg("portage: Update type \""+mysplit[0]+"\" not recognized.\n")
+ processed=0
+ continue
+ if mysplit[0]=="move" and len(mysplit)!=3:
+ writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
+ processed=0
+ continue
+ if mysplit[0]=="slotmove" and len(mysplit)!=4:
+ writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
+ processed=0
+ continue
+ sys.stdout.write(".")
+ sys.stdout.flush()
+
+ if mysplit[0]=="move":
+ db["/"]["vartree"].dbapi.move_ent(mysplit)
+ db["/"]["bintree"].move_ent(mysplit)
+ #update world entries:
+ for x in range(0,len(worldlist)):
+ #update world entries, if any.
+ worldlist[x]=dep_transform(worldlist[x],mysplit[1],mysplit[2])
+
+ #update /etc/portage/packages.*
+ for x in file_contents:
+ for mypos in range(0,len(file_contents[x])):
+ line=file_contents[x][mypos]
+ if line[0]=="#" or string.strip(line)=="":
+ continue
+ key=dep_getkey(line.split()[0])
+ if key==mysplit[1]:
+ file_contents[x][mypos]=string.replace(line,mysplit[1],mysplit[2])
+ update_files[x]=1
+ sys.stdout.write("p")
+ sys.stdout.flush()
+
+ elif mysplit[0]=="slotmove":
+ db["/"]["vartree"].dbapi.move_slot_ent(mysplit)
+ db["/"]["bintree"].move_slot_ent(mysplit,settings["PORTAGE_TMPDIR"]+"/tbz2")
+
+ for x in update_files:
+ mydblink = dblink('','','/',settings)
+ if mydblink.isprotected("/etc/portage/"+x):
+ updating_file=new_protect_filename("/etc/portage/"+x)[0]
+ else:
+ updating_file="/etc/portage/"+x
+ try:
+ myfile=open(updating_file,"w")
+ myfile.writelines(file_contents[x])
+ myfile.close()
+ except IOError:
+ continue
+
+ # We gotta do the brute force updates for these now.
+ if (settings["PORTAGE_CALLER"] in ["fixpackages"]) or \
+ ("fixpackages" in features):
+ db["/"]["bintree"].update_ents(myupd,settings["PORTAGE_TMPDIR"]+"/tbz2")
+ else:
+ do_upgrade_packagesmessage = 1
+
+ if processed:
+ #update our internal mtime since we processed all our directives.
+ mtimedb["updates"][mykey]=os.stat(mykey)[stat.ST_MTIME]
+ myworld=open("/"+WORLD_FILE,"w")
+ for x in worldlist:
+ myworld.write(x+"\n")
+ myworld.close()
+ print ""
+
+def portageexit():
+ global uid,portage_gid,portdb,db
+ if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
+ # wait child process death
+ try:
+ while True:
+ os.wait()
+ except OSError:
+ #writemsg(">>> All child process are now dead.")
+ pass
+
+ close_portdbapi_caches()
+
+ if mtimedb:
+ # Store mtimedb
+ mymfn=mtimedbfile
+ try:
+ mtimedb["version"]=VERSION
+ cPickle.dump(mtimedb, open(mymfn,"w"), -1)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+
+ try:
+ os.chown(mymfn,uid,portage_gid)
+ os.chmod(mymfn,0664)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+
+atexit.register(portageexit)
+
+if (secpass==2) and (not os.environ.has_key("SANDBOX_ACTIVE")):
+ if settings["PORTAGE_CALLER"] in ["emerge","fixpackages"]:
+ #only do this if we're root and not running repoman/ebuild digest
+ updpath=os.path.normpath(settings["PORTDIR"]+"///profiles/updates")
+ didupdate=0
+ if not mtimedb.has_key("updates"):
+ mtimedb["updates"]={}
+ try:
+ mylist=listdir(updpath,EmptyOnError=1)
+ # resort the list
+ mylist=[myfile[3:]+"-"+myfile[:2] for myfile in mylist]
+ mylist.sort()
+ mylist=[myfile[5:]+"-"+myfile[:4] for myfile in mylist]
+ for myfile in mylist:
+ mykey=updpath+"/"+myfile
+ if not os.path.isfile(mykey):
+ continue
+ if (not mtimedb["updates"].has_key(mykey)) or \
+ (mtimedb["updates"][mykey] != os.stat(mykey)[stat.ST_MTIME]) or \
+ (settings["PORTAGE_CALLER"] == "fixpackages"):
+ didupdate=1
+ do_upgrade(mykey)
+ portageexit() # This lets us save state for C-c.
+ except OSError:
+ #directory doesn't exist
+ pass
+ if didupdate:
+ #make sure our internal databases are consistent; recreate our virts and vartree
+ do_vartree(settings)
+ if do_upgrade_packagesmessage and \
+ listdir(settings["PKGDIR"]+"/All/",EmptyOnError=1):
+ writemsg("\n\n\n ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
+ writemsg("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
+ writemsg("\n")
+
+
+
+
+
+#continue setting up other trees
+db["/"]["porttree"]=portagetree("/",virts)
+db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
+if root!="/":
+ db[root]["porttree"]=portagetree(root,virts)
+ db[root]["bintree"]=binarytree(root,settings["PKGDIR"],virts)
+
+profileroots = [settings["PORTDIR"]+"/profiles/"]
+for x in settings["PORTDIR_OVERLAY"].split():
+ profileroots.insert(0, x+"/profiles/")
+thirdparty_lists = grab_multiple("thirdpartymirrors", profileroots, grabdict)
+thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
+
+if not os.path.exists(settings["PORTAGE_TMPDIR"]):
+ writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
+ writemsg("does not exist. Please create this directory or correct your PORTAGE_TMPDIR setting.\n")
+ sys.exit(1)
+if not os.path.isdir(settings["PORTAGE_TMPDIR"]):
+ writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
+ writemsg("is not a directory. Please correct your PORTAGE_TMPDIR setting.\n")
+ sys.exit(1)
+
+# COMPATABILITY -- This shouldn't be used.
+pkglines = settings.packages
+
+groups=settings["ACCEPT_KEYWORDS"].split()
+archlist=[]
+for myarch in grabfile(settings["PORTDIR"]+"/profiles/arch.list"):
+ archlist += [myarch,"~"+myarch]
+for group in groups:
+ if not archlist:
+ writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
+ break
+ elif (group not in archlist) and group[0]!='-':
+ writemsg("\n"+red("!!! INVALID ACCEPT_KEYWORDS: ")+str(group)+"\n")
+
+# Clear the cache
+dircache={}
+
+if not os.path.islink(PROFILE_PATH) and os.path.exists(settings["PORTDIR"]+"/profiles"):
+ writemsg(red("\a\n\n!!! "+PROFILE_PATH+" is not a symlink and will probably prevent most merges.\n"))
+ writemsg(red("!!! It should point into a profile within %s/profiles/\n" % settings["PORTDIR"]))
+ writemsg(red("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
+ time.sleep(3)
+
+# ============================================================================
+# ============================================================================
+
diff --git a/pym/portage.py.orig b/pym/portage.py.orig
new file mode 100644
index 000000000..5fae17eaf
--- /dev/null
+++ b/pym/portage.py.orig
@@ -0,0 +1,7427 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage.py,v 1.524.2.59 2005/04/23 07:26:04 jstubbs Exp $
+cvs_id_string="$Id: portage.py,v 1.524.2.59 2005/04/23 07:26:04 jstubbs Exp $"[5:-2]
+
+VERSION="$Revision: 1.524.2.59 $"[11:-2] + "-cvs"
+
+# ===========================================================================
+# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
+# ===========================================================================
+
+try:
+ import sys
+except SystemExit, e:
+ raise
+except:
+ print "Failed to import sys! Something is _VERY_ wrong with python."
+ raise SystemExit, 127
+
+try:
+ import os,string,types,atexit,signal,fcntl
+ import time,cPickle,traceback,copy
+ import re,pwd,grp,commands
+ import shlex,shutil
+
+ import stat
+ import commands
+ from time import sleep
+ from random import shuffle
+except SystemExit, e:
+ raise
+except Exception, e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
+ sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
+ sys.stderr.write("!!! itself and thus portage is no able to continue processing.\n\n")
+
+ sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
+ sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
+
+ sys.stderr.write(" "+str(e)+"\n\n");
+ sys.exit(127)
+except:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
+ sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
+ sys.stderr.write("!!! itself and thus portage is no able to continue processing.\n\n")
+
+ sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
+ sys.stderr.write("!!! gone wrong. The exception was non-standard and we were unable to catch it.\n\n")
+ sys.exit(127)
+
+try:
+ # XXX: This should get renamed to bsd_chflags, I think.
+ import chflags
+ bsd_chflags = chflags
+except SystemExit, e:
+ raise
+except:
+ # XXX: This should get renamed to bsd_chflags, I think.
+ bsd_chflags = None
+
+try:
+ import cvstree
+ import xpak
+ import getbinpkg
+ import portage_dep
+
+ # XXX: This needs to get cleaned up.
+ import output
+ from output import blue, bold, brown, darkblue, darkgreen, darkred, darkteal, \
+ darkyellow, fuchsia, fuscia, green, purple, red, teal, turquoise, white, \
+ xtermTitle, xtermTitleReset, yellow
+
+ import portage_const
+ from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
+ USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
+ PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
+ EBUILD_SH_BINARY, SANDBOX_BINARY, DEPSCAN_SH_BINARY, BASH_BINARY, \
+ MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
+ DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
+ INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, SANDBOX_PIDS_FILE, CONFIG_MEMORY_FILE,\
+ INCREMENTALS, STICKIES
+
+ from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
+ portage_uid, portage_gid
+
+ import portage_util
+ from portage_util import grab_multiple, grabdict, grabdict_package, grabfile, grabfile_package, \
+ grabints, map_dictlist_vals, pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
+ unique_array, varexpand, writedict, writeints, writemsg, getconfig
+ import portage_exception
+ import portage_gpg
+ import portage_locks
+ import portage_exec
+ from portage_locks import unlockfile,unlockdir,lockfile,lockdir
+ import portage_checksum
+ from portage_checksum import perform_md5,perform_checksum,prelink_capable
+ from portage_localization import _
+except SystemExit, e:
+ raise
+except Exception, e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
+ sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
+ sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
+ sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
+ sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
+ sys.stderr.write("!!! a recovery of portage.\n")
+
+ sys.stderr.write(" "+str(e)+"\n\n")
+ sys.exit(127)
+
+
+# ===========================================================================
+# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
+# ===========================================================================
+
+
+def exithandler(signum,frame):
+ """Handles ^C interrupts in a sane manner"""
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+
+ # 0=send to *everybody* in process group
+ portageexit()
+ print "Exiting due to signal"
+ os.kill(0,signum)
+ sys.exit(1)
+
+signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+signal.signal(signal.SIGINT, exithandler)
+signal.signal(signal.SIGTERM, exithandler)
+signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+def load_mod(name):
+ modname = string.join(string.split(name,".")[:-1],".")
+ mod = __import__(modname)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
+ for x in key_order:
+ if top_dict.has_key(x) and top_dict[x].has_key(key):
+ if FullCopy:
+ return copy.deepcopy(top_dict[x][key])
+ else:
+ return top_dict[x][key]
+ if EmptyOnError:
+ return ""
+ else:
+ raise KeyError, "Key not found in list; '%s'" % key
+
+def getcwd():
+ "this fixes situations where the current directory doesn't exist"
+ try:
+ return os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ return "/"
+getcwd()
+
+def abssymlink(symlink):
+ "This reads symlinks, resolving the relative symlinks, and returning the absolute."
+ mylink=os.readlink(symlink)
+ if mylink[0] != '/':
+ mydir=os.path.dirname(symlink)
+ mylink=mydir+"/"+mylink
+ return os.path.normpath(mylink)
+
+def suffix_array(array,suffix,doblanks=1):
+ """Appends a given suffix to each element in an Array/List/Tuple.
+ Returns a List."""
+ if type(array) not in [types.ListType, types.TupleType]:
+ raise TypeError, "List or Tuple expected. Got %s" % type(array)
+ newarray=[]
+ for x in array:
+ if x or doblanks:
+ newarray.append(x + suffix)
+ else:
+ newarray.append(x)
+ return newarray
+
+def prefix_array(array,prefix,doblanks=1):
+ """Prepends a given prefix to each element in an Array/List/Tuple.
+ Returns a List."""
+ if type(array) not in [types.ListType, types.TupleType]:
+ raise TypeError, "List or Tuple expected. Got %s" % type(array)
+ newarray=[]
+ for x in array:
+ if x or doblanks:
+ newarray.append(prefix + x)
+ else:
+ newarray.append(x)
+ return newarray
+
+def normalize_path(mypath):
+ newpath = os.path.normpath(mypath)
+ if len(newpath) > 1:
+ if newpath[:2] == "//":
+ newpath = newpath[1:]
+ return newpath
+
+dircache = {}
+cacheHit=0
+cacheMiss=0
+cacheStale=0
+def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
+ global cacheHit,cacheMiss,cacheStale
+ mypath = normalize_path(my_original_path)
+ if dircache.has_key(mypath):
+ cacheHit += 1
+ cached_mtime, list, ftype = dircache[mypath]
+ else:
+ cacheMiss += 1
+ cached_mtime, list, ftype = -1, [], []
+ try:
+ pathstat = os.stat(mypath)
+ if stat.S_ISDIR(pathstat[stat.ST_MODE]):
+ mtime = pathstat[stat.ST_MTIME]
+ else:
+ raise Exception
+ except SystemExit, e:
+ raise
+ except:
+ if EmptyOnError:
+ return [], []
+ return None, None
+ if mtime != cached_mtime:
+ if dircache.has_key(mypath):
+ cacheStale += 1
+ list = os.listdir(mypath)
+ ftype = []
+ for x in list:
+ try:
+ if followSymlinks:
+ pathstat = os.stat(mypath+"/"+x)
+ else:
+ pathstat = os.lstat(mypath+"/"+x)
+
+ if stat.S_ISREG(pathstat[stat.ST_MODE]):
+ ftype.append(0)
+ elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
+ ftype.append(1)
+ elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
+ ftype.append(2)
+ else:
+ ftype.append(3)
+ except SystemExit, e:
+ raise
+ except:
+ ftype.append(3)
+ dircache[mypath] = mtime, list, ftype
+
+ ret_list = []
+ ret_ftype = []
+ for x in range(0, len(list)):
+ if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
+ ret_list.append(list[x])
+ ret_ftype.append(ftype[x])
+ elif (list[x] not in ignorelist):
+ ret_list.append(list[x])
+ ret_ftype.append(ftype[x])
+
+ writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
+ return ret_list, ret_ftype
+
+
+def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
+ EmptyOnError=False):
+
+ list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
+
+ if list is None:
+ list=[]
+ if ftype is None:
+ ftype=[]
+
+ if not filesonly and not recursive:
+ return list
+
+ if recursive:
+ x=0
+ while x<len(ftype):
+ if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn')):
+ l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
+ followSymlinks)
+
+ l=l[:]
+ for y in range(0,len(l)):
+ l[y]=list[x]+"/"+l[y]
+ list=list+l
+ ftype=ftype+f
+ x+=1
+ if filesonly:
+ rlist=[]
+ for x in range(0,len(ftype)):
+ if ftype[x]==0:
+ rlist=rlist+[list[x]]
+ else:
+ rlist=list
+
+ return rlist
+
+starttime=long(time.time())
+features=[]
+
+def tokenize(mystring):
+ """breaks a string like 'foo? (bar) oni? (blah (blah))'
+ into embedded lists; returns None on paren mismatch"""
+
+ # This function is obsoleted.
+ # Use dep_parenreduce
+
+ newtokens=[]
+ curlist=newtokens
+ prevlists=[]
+ level=0
+ accum=""
+ for x in mystring:
+ if x=="(":
+ if accum:
+ curlist.append(accum)
+ accum=""
+ prevlists.append(curlist)
+ curlist=[]
+ level=level+1
+ elif x==")":
+ if accum:
+ curlist.append(accum)
+ accum=""
+ if level==0:
+ writemsg("!!! tokenizer: Unmatched left parenthesis in:\n'"+str(mystring)+"'\n")
+ return None
+ newlist=curlist
+ curlist=prevlists.pop()
+ curlist.append(newlist)
+ level=level-1
+ elif x in string.whitespace:
+ if accum:
+ curlist.append(accum)
+ accum=""
+ else:
+ accum=accum+x
+ if accum:
+ curlist.append(accum)
+ if (level!=0):
+ writemsg("!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+str(mystring)+"'\n")
+ return None
+ return newtokens
+
+def flatten(mytokens):
+ """this function now turns a [1,[2,3]] list into
+ a [1,2,3] list and returns it."""
+ newlist=[]
+ for x in mytokens:
+ if type(x)==types.ListType:
+ newlist.extend(flatten(x))
+ else:
+ newlist.append(x)
+ return newlist
+
+#beautiful directed graph object
+
+class digraph:
+ def __init__(self):
+ self.dict={}
+ #okeys = keys, in order they were added (to optimize firstzero() ordering)
+ self.okeys=[]
+
+ def addnode(self,mykey,myparent):
+ if not self.dict.has_key(mykey):
+ self.okeys.append(mykey)
+ if myparent==None:
+ self.dict[mykey]=[0,[]]
+ else:
+ self.dict[mykey]=[0,[myparent]]
+ self.dict[myparent][0]=self.dict[myparent][0]+1
+ return
+ if myparent and (not myparent in self.dict[mykey][1]):
+ self.dict[mykey][1].append(myparent)
+ self.dict[myparent][0]=self.dict[myparent][0]+1
+
+ def delnode(self,mykey):
+ if not self.dict.has_key(mykey):
+ return
+ for x in self.dict[mykey][1]:
+ self.dict[x][0]=self.dict[x][0]-1
+ del self.dict[mykey]
+ while 1:
+ try:
+ self.okeys.remove(mykey)
+ except ValueError:
+ break
+
+ def allnodes(self):
+ "returns all nodes in the dictionary"
+ return self.dict.keys()
+
+ def firstzero(self):
+ "returns first node with zero references, or NULL if no such node exists"
+ for x in self.okeys:
+ if self.dict[x][0]==0:
+ return x
+ return None
+
+ def depth(self, mykey):
+ depth=0
+ while (self.dict[mykey][1]):
+ depth=depth+1
+ mykey=self.dict[mykey][1][0]
+ return depth
+
+ def allzeros(self):
+ "returns all nodes with zero references, or NULL if no such node exists"
+ zerolist = []
+ for x in self.dict.keys():
+ mys = string.split(x)
+ if mys[0] != "blocks" and self.dict[x][0]==0:
+ zerolist.append(x)
+ return zerolist
+
+ def hasallzeros(self):
+ "returns 0/1, Are all nodes zeros? 1 : 0"
+ zerolist = []
+ for x in self.dict.keys():
+ if self.dict[x][0]!=0:
+ return 0
+ return 1
+
+ def empty(self):
+ if len(self.dict)==0:
+ return 1
+ return 0
+
+ def hasnode(self,mynode):
+ return self.dict.has_key(mynode)
+
+ def copy(self):
+ mygraph=digraph()
+ for x in self.dict.keys():
+ mygraph.dict[x]=self.dict[x][:]
+ mygraph.okeys=self.okeys[:]
+ return mygraph
+
+# valid end of version components; integers specify offset from release version
+# pre=prerelease, p=patchlevel (should always be followed by an int), rc=release candidate
+# all but _p (where it is required) can be followed by an optional trailing integer
+
+endversion={"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1}
+# as there's no reliable way to set {}.keys() order
+# netversion_keys will be used instead of endversion.keys
+# to have fixed search order, so that "pre" is checked
+# before "p"
+endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
+
+#parse /etc/env.d and generate /etc/profile.env
+
+def env_update(makelinks=1):
+ global root
+ if not os.path.exists(root+"etc/env.d"):
+ prevmask=os.umask(0)
+ os.makedirs(root+"etc/env.d",0755)
+ os.umask(prevmask)
+ fns=listdir(root+"etc/env.d",EmptyOnError=1)
+ fns.sort()
+ pos=0
+ while (pos<len(fns)):
+ if len(fns[pos])<=2:
+ del fns[pos]
+ continue
+ if (fns[pos][0] not in string.digits) or (fns[pos][1] not in string.digits):
+ del fns[pos]
+ continue
+ pos=pos+1
+
+ specials={
+ "KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],
+ "INFODIR":[],"INFOPATH":[],"ROOTPATH":[],"CONFIG_PROTECT":[],
+ "CONFIG_PROTECT_MASK":[],"PRELINK_PATH":[],"PRELINK_PATH_MASK":[],
+ "PYTHONPATH":[], "ADA_INCLUDE_PATH":[], "ADA_OBJECTS_PATH":[]
+ }
+ colon_separated = [
+ "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
+ "LDPATH", "MANPATH",
+ "PATH", "PRELINK_PATH",
+ "PRELINK_PATH_MASK", "PYTHON_PATH"
+ ]
+
+ env={}
+
+ for x in fns:
+ # don't process backup files
+ if x[-1]=='~' or x[-4:]==".bak":
+ continue
+ myconfig=getconfig(root+"etc/env.d/"+x)
+ if myconfig==None:
+ writemsg("!!! Parsing error in "+str(root)+"etc/env.d/"+str(x)+"\n")
+ #parse error
+ continue
+ # process PATH, CLASSPATH, LDPATH
+ for myspec in specials.keys():
+ if myconfig.has_key(myspec):
+ if myspec in colon_separated:
+ specials[myspec].extend(string.split(varexpand(myconfig[myspec]),":"))
+ else:
+ specials[myspec].append(varexpand(myconfig[myspec]))
+ del myconfig[myspec]
+ # process all other variables
+ for myenv in myconfig.keys():
+ env[myenv]=varexpand(myconfig[myenv])
+
+ if os.path.exists(root+"etc/ld.so.conf"):
+ myld=open(root+"etc/ld.so.conf")
+ myldlines=myld.readlines()
+ myld.close()
+ oldld=[]
+ for x in myldlines:
+ #each line has at least one char (a newline)
+ if x[0]=="#":
+ continue
+ oldld.append(x[:-1])
+ # os.rename(root+"etc/ld.so.conf",root+"etc/ld.so.conf.bak")
+ # Where is the new ld.so.conf generated? (achim)
+ else:
+ oldld=None
+
+ ld_cache_update=False
+ if os.environ.has_key("PORTAGE_CALLER") and \
+ os.environ["PORTAGE_CALLER"] == "env-update":
+ ld_cache_update = True
+
+ newld=specials["LDPATH"]
+ if (oldld!=newld):
+ #ld.so.conf needs updating and ldconfig needs to be run
+ myfd=open(root+"etc/ld.so.conf","w")
+ myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
+ myfd.write("# contents of /etc/env.d directory\n")
+ for x in specials["LDPATH"]:
+ myfd.write(x+"\n")
+ myfd.close()
+ ld_cache_update=True
+
+ # Update prelink.conf if we are prelink-enabled
+ if prelink_capable:
+ newprelink=open(root+"etc/prelink.conf","w")
+ newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
+ newprelink.write("# contents of /etc/env.d directory\n")
+
+ for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
+ newprelink.write("-l "+x+"\n");
+ for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
+ if not x:
+ continue
+ if x[-1]!='/':
+ x=x+"/"
+ plmasked=0
+ for y in specials["PRELINK_PATH_MASK"]:
+ if not y:
+ continue
+ if y[-1]!='/':
+ y=y+"/"
+ if y==x[0:len(y)]:
+ plmasked=1
+ break
+ if not plmasked:
+ newprelink.write("-h "+x+"\n")
+ for x in specials["PRELINK_PATH_MASK"]:
+ newprelink.write("-b "+x+"\n")
+ newprelink.close()
+
+ if not mtimedb.has_key("ldpath"):
+ mtimedb["ldpath"]={}
+
+ for x in specials["LDPATH"]+['/usr/lib','/lib']:
+ try:
+ newldpathtime=os.stat(x)[stat.ST_MTIME]
+ except SystemExit, e:
+ raise
+ except:
+ newldpathtime=0
+ if mtimedb["ldpath"].has_key(x):
+ if mtimedb["ldpath"][x]==newldpathtime:
+ pass
+ else:
+ mtimedb["ldpath"][x]=newldpathtime
+ ld_cache_update=True
+ else:
+ mtimedb["ldpath"][x]=newldpathtime
+ ld_cache_update=True
+
+ if (ld_cache_update or makelinks):
+ # We can't update links if we haven't cleaned other versions first, as
+ # an older package installed ON TOP of a newer version will cause ldconfig
+ # to overwrite the symlinks we just made. -X means no links. After 'clean'
+ # we can safely create links.
+ writemsg(">>> Regenerating "+str(root)+"etc/ld.so.cache...\n")
+ if makelinks:
+ commands.getstatusoutput("cd / ; /sbin/ldconfig -r "+root)
+ else:
+ commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r "+root)
+
+ del specials["LDPATH"]
+
+ penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
+ penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
+ cenvnotice = penvnotice[:]
+ penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
+ cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
+
+ #create /etc/profile.env for bash support
+ outfile=open(root+"/etc/profile.env","w")
+ outfile.write(penvnotice)
+
+ for path in specials.keys():
+ if len(specials[path])==0:
+ continue
+ outstring="export "+path+"='"
+ if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
+ for x in specials[path][:-1]:
+ outstring += x+" "
+ else:
+ for x in specials[path][:-1]:
+ outstring=outstring+x+":"
+ outstring=outstring+specials[path][-1]+"'"
+ outfile.write(outstring+"\n")
+
+ #create /etc/profile.env
+ for x in env.keys():
+ if type(env[x])!=types.StringType:
+ continue
+ outfile.write("export "+x+"='"+env[x]+"'\n")
+ outfile.close()
+
+ #create /etc/csh.env for (t)csh support
+ outfile=open(root+"/etc/csh.env","w")
+ outfile.write(cenvnotice)
+
+ for path in specials.keys():
+ if len(specials[path])==0:
+ continue
+ outstring="setenv "+path+" '"
+ if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
+ for x in specials[path][:-1]:
+ outstring += x+" "
+ else:
+ for x in specials[path][:-1]:
+ outstring=outstring+x+":"
+ outstring=outstring+specials[path][-1]+"'"
+ outfile.write(outstring+"\n")
+ #get it out of the way
+ del specials[path]
+
+ #create /etc/csh.env
+ for x in env.keys():
+ if type(env[x])!=types.StringType:
+ continue
+ outfile.write("setenv "+x+" '"+env[x]+"'\n")
+ outfile.close()
+ if os.path.exists(DEPSCAN_SH_BINARY):
+ spawn(DEPSCAN_SH_BINARY,settings,free=1)
+
+def new_protect_filename(mydest, newmd5=None):
+ """Resolves a config-protect filename for merging, optionally
+ using the last filename if the md5 matches.
+ (dest,md5) ==> 'string' --- path_to_target_filename
+ (dest) ==> ('next', 'highest') --- next_target and most-recent_target
+ """
+
+ # config protection filename format:
+ # ._cfg0000_foo
+ # 0123456789012
+ prot_num=-1
+ last_pfile=""
+
+ if (len(mydest) == 0):
+ raise ValueError, "Empty path provided where a filename is required"
+ if (mydest[-1]=="/"): # XXX add better directory checking
+ raise ValueError, "Directory provided but this function requires a filename"
+ if not os.path.exists(mydest):
+ return mydest
+
+ real_filename = os.path.basename(mydest)
+ real_dirname = os.path.dirname(mydest)
+ for pfile in listdir(real_dirname):
+ if pfile[0:5] != "._cfg":
+ continue
+ if pfile[10:] != real_filename:
+ continue
+ try:
+ new_prot_num = int(pfile[5:9])
+ if new_prot_num > prot_num:
+ prot_num = new_prot_num
+ last_pfile = pfile
+ except SystemExit, e:
+ raise
+ except:
+ continue
+ prot_num = prot_num + 1
+
+ new_pfile = os.path.normpath(real_dirname+"/._cfg"+string.zfill(prot_num,4)+"_"+real_filename)
+ old_pfile = os.path.normpath(real_dirname+"/"+last_pfile)
+ if last_pfile and newmd5:
+ if portage_checksum.perform_md5(real_dirname+"/"+last_pfile) == newmd5:
+ return old_pfile
+ else:
+ return new_pfile
+ elif newmd5:
+ return new_pfile
+ else:
+ return (new_pfile, old_pfile)
+
+#XXX: These two are now implemented in portage_util.py but are needed here
+#XXX: until the isvalidatom() dependency is sorted out.
+
+def grabdict_package(myfilename,juststrings=0):
+ pkgs=grabdict(myfilename, juststrings=juststrings, empty=1)
+ for x in pkgs.keys():
+ if not isvalidatom(x):
+ del(pkgs[x])
+ writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x))
+ return pkgs
+
+def grabfile_package(myfilename,compatlevel=0):
+ pkgs=grabfile(myfilename,compatlevel)
+ for x in range(len(pkgs)-1,-1,-1):
+ pkg = pkgs[x]
+ if pkg[0] == "-":
+ pkg = pkg[1:]
+ if pkg[0] == "*":
+ pkg = pkg[1:]
+ if not isvalidatom(pkg):
+ writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]))
+ del(pkgs[x])
+ return pkgs
+
+# returns a tuple. (version[string], error[string])
+# They are pretty much mutually exclusive.
+# Either version is a string and error is none, or
+# version is None and error is a string
+#
+def ExtractKernelVersion(base_dir):
+ lines = []
+ pathname = os.path.join(base_dir, 'Makefile')
+ try:
+ f = open(pathname, 'r')
+ except OSError, details:
+ return (None, str(details))
+ except IOError, details:
+ return (None, str(details))
+
+ try:
+ for i in range(4):
+ lines.append(f.readline())
+ except OSError, details:
+ return (None, str(details))
+ except IOError, details:
+ return (None, str(details))
+
+ lines = map(string.strip, lines)
+
+ version = ''
+
+ #XXX: The following code relies on the ordering of vars within the Makefile
+ for line in lines:
+ # split on the '=' then remove annoying whitespace
+ items = string.split(line, '=')
+ items = map(string.strip, items)
+ if items[0] == 'VERSION' or \
+ items[0] == 'PATCHLEVEL':
+ version += items[1]
+ version += "."
+ elif items[0] == 'SUBLEVEL':
+ version += items[1]
+ elif items[0] == 'EXTRAVERSION' and \
+ items[-1] != items[0]:
+ version += items[1]
+
+ # Grab a list of files named localversion* and sort them
+ localversions = os.listdir(base_dir)
+ for x in range(len(localversions)-1,-1,-1):
+ if localversions[x][:12] != "localversion":
+ del localversions[x]
+ localversions.sort()
+
+ # Append the contents of each to the version string, stripping ALL whitespace
+ for lv in localversions:
+ version += string.join(string.split(string.join(grabfile(base_dir+"/"+lv))), "")
+
+ # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
+ kernelconfig = getconfig(base_dir+"/.config")
+ if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
+ version += string.join(string.split(kernelconfig["CONFIG_LOCALVERSION"]), "")
+
+ return (version,None)
+
+
+autouse_val = None
+def autouse(myvartree,use_cache=1):
+ "returns set of USE variables auto-enabled due to packages being installed"
+ global usedefaults, autouse_val
+ if autouse_val is not None:
+ return autouse_val
+ if profiledir==None:
+ autouse_val = ""
+ return ""
+ myusevars=""
+ for myuse in usedefaults:
+ dep_met = True
+ for mydep in usedefaults[myuse]:
+ if not myvartree.dep_match(mydep,use_cache=True):
+ dep_met = False
+ break
+ if dep_met:
+ myusevars += " "+myuse
+ autouse_val = myusevars
+ return myusevars
+
+def check_config_instance(test):
+ if not test or (str(test.__class__) != 'portage.config'):
+ raise TypeError, "Invalid type for config object: %s" % test.__class__
+
+class config:
+ def __init__(self, clone=None, mycpv=None, config_profile_path=None, config_incrementals=None):
+
+ self.already_in_regenerate = 0
+
+ self.locked = 0
+ self.mycpv = None
+ self.puse = []
+ self.modifiedkeys = []
+
+ self.virtuals = {}
+ self.v_count = 0
+
+ # Virtuals obtained from the vartree
+ self.treeVirtuals = {}
+ # Virtuals by user specification. Includes negatives.
+ self.userVirtuals = {}
+ # Virtual negatives from user specifications.
+ self.negVirtuals = {}
+
+ self.user_profile_dir = None
+
+ if clone:
+ self.incrementals = copy.deepcopy(clone.incrementals)
+ self.profile_path = copy.deepcopy(clone.profile_path)
+ self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
+
+ self.module_priority = copy.deepcopy(clone.module_priority)
+ self.modules = copy.deepcopy(clone.modules)
+
+ self.depcachedir = copy.deepcopy(clone.depcachedir)
+
+ self.packages = copy.deepcopy(clone.packages)
+ self.virtuals = copy.deepcopy(clone.virtuals)
+
+ self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
+ self.userVirtuals = copy.deepcopy(clone.userVirtuals)
+ self.negVirtuals = copy.deepcopy(clone.negVirtuals)
+
+ self.use_defs = copy.deepcopy(clone.use_defs)
+ self.usemask = copy.deepcopy(clone.usemask)
+
+ self.configlist = copy.deepcopy(clone.configlist)
+ self.configlist[-1] = os.environ.copy()
+ self.configdict = { "globals": self.configlist[0],
+ "defaults": self.configlist[1],
+ "conf": self.configlist[2],
+ "pkg": self.configlist[3],
+ "auto": self.configlist[4],
+ "backupenv": self.configlist[5],
+ "env": self.configlist[6] }
+ self.profiles = copy.deepcopy(clone.profiles)
+ self.backupenv = copy.deepcopy(clone.backupenv)
+ self.pusedict = copy.deepcopy(clone.pusedict)
+ self.categories = copy.deepcopy(clone.categories)
+ self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
+ self.pmaskdict = copy.deepcopy(clone.pmaskdict)
+ self.punmaskdict = copy.deepcopy(clone.punmaskdict)
+ self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
+ self.pprovideddict = copy.deepcopy(clone.pprovideddict)
+ self.lookuplist = copy.deepcopy(clone.lookuplist)
+ self.uvlist = copy.deepcopy(clone.uvlist)
+ self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
+ self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
+ else:
+ self.depcachedir = DEPCACHE_PATH
+
+ if not config_profile_path:
+ global profiledir
+ writemsg("config_profile_path not specified to class config\n")
+ self.profile_path = profiledir[:]
+ else:
+ self.profile_path = config_profile_path[:]
+
+ if not config_incrementals:
+ writemsg("incrementals not specified to class config\n")
+ self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
+ else:
+ self.incrementals = copy.deepcopy(config_incrementals)
+
+ self.module_priority = ["user","default"]
+ self.modules = {}
+ self.modules["user"] = getconfig(MODULES_FILE_PATH)
+ if self.modules["user"] == None:
+ self.modules["user"] = {}
+ self.modules["default"] = {
+ "portdbapi.metadbmodule": "portage_db_flat.database",
+ "portdbapi.auxdbmodule": "portage_db_flat.database",
+ "eclass_cache.dbmodule": "portage_db_cpickle.database",
+ }
+
+ self.usemask=[]
+ self.configlist=[]
+ self.backupenv={}
+ # back up our incremental variables:
+ self.configdict={}
+ # configlist will contain: [ globals, defaults, conf, pkg, auto, backupenv (incrementals), origenv ]
+
+ # The symlink might not exist or might not be a symlink.
+ try:
+ self.profiles=[abssymlink(self.profile_path)]
+ except SystemExit, e:
+ raise
+ except:
+ self.profiles=[self.profile_path]
+
+ mypath = self.profiles[0]
+ while os.path.exists(mypath+"/parent"):
+ mypath = os.path.normpath(mypath+"///"+grabfile(mypath+"/parent")[0])
+ if os.path.exists(mypath):
+ self.profiles.insert(0,mypath)
+
+ if os.environ.has_key("PORTAGE_CALLER") and os.environ["PORTAGE_CALLER"] == "repoman":
+ pass
+ else:
+ # XXX: This should depend on ROOT?
+ if os.path.exists("/"+CUSTOM_PROFILE_PATH):
+ self.user_profile_dir = os.path.normpath("/"+"///"+CUSTOM_PROFILE_PATH)
+ self.profiles.append(self.user_profile_dir[:])
+
+ self.packages_list = grab_multiple("packages", self.profiles, grabfile_package)
+ self.packages = stack_lists(self.packages_list, incremental=1)
+ del self.packages_list
+ #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
+
+ # revmaskdict
+ self.prevmaskdict={}
+ for x in self.packages:
+ mycatpkg=dep_getkey(x)
+ if not self.prevmaskdict.has_key(mycatpkg):
+ self.prevmaskdict[mycatpkg]=[x]
+ else:
+ self.prevmaskdict[mycatpkg].append(x)
+
+ # get profile-masked use flags -- INCREMENTAL Child over parent
+ usemask_lists = grab_multiple("use.mask", self.profiles, grabfile)
+ self.usemask = stack_lists(usemask_lists, incremental=True)
+ del usemask_lists
+ use_defs_lists = grab_multiple("use.defaults", self.profiles, grabdict)
+ self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
+ del use_defs_lists
+
+ try:
+ mygcfg_dlists = grab_multiple("make.globals", self.profiles+["/etc"], getconfig)
+ self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
+
+ if self.mygcfg == None:
+ self.mygcfg = {}
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! %s\n" % (e))
+ writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
+ writemsg("!!! Errors in this file should be reported on bugs.gentoo.org.\n")
+ sys.exit(1)
+ self.configlist.append(self.mygcfg)
+ self.configdict["globals"]=self.configlist[-1]
+
+ self.mygcfg = {}
+ if self.profiles:
+ try:
+ mygcfg_dlists = grab_multiple("make.defaults", self.profiles, getconfig)
+ self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
+ #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
+ if self.mygcfg == None:
+ self.mygcfg = {}
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! %s\n" % (e))
+ writemsg("!!! 'rm -Rf /usr/portage/profiles; emerge sync' may fix this. If it does\n")
+ writemsg("!!! not then please report this to bugs.gentoo.org and, if possible, a dev\n")
+ writemsg("!!! on #gentoo (irc.freenode.org)\n")
+ sys.exit(1)
+ self.configlist.append(self.mygcfg)
+ self.configdict["defaults"]=self.configlist[-1]
+
+ try:
+ # XXX: Should depend on root?
+ self.mygcfg=getconfig("/"+MAKE_CONF_FILE,allow_sourcing=True)
+ if self.mygcfg == None:
+ self.mygcfg = {}
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! %s\n" % (e))
+ writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
+ sys.exit(1)
+
+
+ self.configlist.append(self.mygcfg)
+ self.configdict["conf"]=self.configlist[-1]
+
+ self.configlist.append({})
+ self.configdict["pkg"]=self.configlist[-1]
+
+ #auto-use:
+ self.configlist.append({})
+ self.configdict["auto"]=self.configlist[-1]
+
+ #backup-env (for recording our calculated incremental variables:)
+ self.backupenv = os.environ.copy()
+ self.configlist.append(self.backupenv) # XXX Why though?
+ self.configdict["backupenv"]=self.configlist[-1]
+
+ self.configlist.append(os.environ.copy())
+ self.configdict["env"]=self.configlist[-1]
+
+
+ # make lookuplist for loading package.*
+ self.lookuplist=self.configlist[:]
+ self.lookuplist.reverse()
+
+ archlist = grabfile(self["PORTDIR"]+"/profiles/arch.list")
+ self.configdict["conf"]["PORTAGE_ARCHLIST"] = string.join(archlist)
+
+ if os.environ.get("PORTAGE_CALLER","") == "repoman":
+ # repoman shouldn't use local settings.
+ locations = [self["PORTDIR"] + "/profiles"]
+ self.pusedict = {}
+ self.pkeywordsdict = {}
+ self.punmaskdict = {}
+ else:
+ locations = [self["PORTDIR"] + "/profiles", USER_CONFIG_PATH]
+
+ pusedict=grabdict_package(USER_CONFIG_PATH+"/package.use")
+ self.pusedict = {}
+ for key in pusedict.keys():
+ cp = dep_getkey(key)
+ if not self.pusedict.has_key(cp):
+ self.pusedict[cp] = {}
+ self.pusedict[cp][key] = pusedict[key]
+
+ #package.keywords
+ pkgdict=grabdict_package(USER_CONFIG_PATH+"/package.keywords")
+ self.pkeywordsdict = {}
+ for key in pkgdict.keys():
+ # default to ~arch if no specific keyword is given
+ if not pkgdict[key]:
+ mykeywordlist = []
+ if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
+ groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
+ else:
+ groups = []
+ for keyword in groups:
+ if not keyword[0] in "~-":
+ mykeywordlist.append("~"+keyword)
+ pkgdict[key] = mykeywordlist
+ cp = dep_getkey(key)
+ if not self.pkeywordsdict.has_key(cp):
+ self.pkeywordsdict[cp] = {}
+ self.pkeywordsdict[cp][key] = pkgdict[key]
+
+ #package.unmask
+ pkgunmasklines = grabfile_package(USER_CONFIG_PATH+"/package.unmask")
+ self.punmaskdict = {}
+ for x in pkgunmasklines:
+ mycatpkg=dep_getkey(x)
+ if self.punmaskdict.has_key(mycatpkg):
+ self.punmaskdict[mycatpkg].append(x)
+ else:
+ self.punmaskdict[mycatpkg]=[x]
+
+ #getting categories from an external file now
+ categories = grab_multiple("categories", locations, grabfile)
+ self.categories = stack_lists(categories, incremental=1)
+ del categories
+
+ # get virtuals -- needs categories
+ self.loadVirtuals('/')
+
+ #package.mask
+ pkgmasklines = grab_multiple("package.mask", self.profiles + locations, grabfile_package)
+ pkgmasklines = stack_lists(pkgmasklines, incremental=1)
+
+ self.pmaskdict = {}
+ for x in pkgmasklines:
+ mycatpkg=dep_getkey(x)
+ if self.pmaskdict.has_key(mycatpkg):
+ self.pmaskdict[mycatpkg].append(x)
+ else:
+ self.pmaskdict[mycatpkg]=[x]
+
+ pkgprovidedlines = grab_multiple("package.provided", self.profiles, grabfile)
+ pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
+ for x in range(len(pkgprovidedlines)-1, -1, -1):
+ cpvr = catpkgsplit(pkgprovidedlines[x])
+ if not cpvr or cpvr[0] == "null":
+ writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n")
+ del pkgprovidedlines[x]
+
+ self.pprovideddict = {}
+ for x in pkgprovidedlines:
+ cpv=catpkgsplit(x)
+ if not x:
+ continue
+ mycatpkg=dep_getkey(x)
+ if self.pprovideddict.has_key(mycatpkg):
+ self.pprovideddict[mycatpkg].append(x)
+ else:
+ self.pprovideddict[mycatpkg]=[x]
+
+ self.lookuplist=self.configlist[:]
+ self.lookuplist.reverse()
+
+ useorder=self["USE_ORDER"]
+ if not useorder:
+ # reasonable defaults; this is important as without USE_ORDER,
+ # USE will always be "" (nothing set)!
+ useorder="env:pkg:conf:auto:defaults"
+ useordersplit=useorder.split(":")
+
+ self.uvlist=[]
+ for x in useordersplit:
+ if self.configdict.has_key(x):
+ if "PKGUSE" in self.configdict[x].keys():
+ del self.configdict[x]["PKGUSE"] # Delete PkgUse, Not legal to set.
+ #prepend db to list to get correct order
+ self.uvlist[0:0]=[self.configdict[x]]
+
+ self.configdict["env"]["PORTAGE_GID"]=str(portage_gid)
+ self.backupenv["PORTAGE_GID"]=str(portage_gid)
+
+ if self.has_key("PORT_LOGDIR") and not self["PORT_LOGDIR"]:
+ # port_logdir is defined, but empty. this causes a traceback in doebuild.
+ writemsg(yellow("!!!")+" PORT_LOGDIR was defined, but set to nothing.\n")
+ writemsg(yellow("!!!")+" Disabling it. Please set it to a non null value.\n")
+ del self["PORT_LOGDIR"]
+
+ if self["PORTAGE_CACHEDIR"]:
+ # XXX: Deprecated -- April 15 -- NJ
+ writemsg(yellow(">>> PORTAGE_CACHEDIR has been deprecated!")+"\n")
+ writemsg(">>> Please use PORTAGE_DEPCACHEDIR instead.\n")
+ self.depcachedir = self["PORTAGE_CACHEDIR"]
+ del self["PORTAGE_CACHEDIR"]
+
+ if self["PORTAGE_DEPCACHEDIR"]:
+ #the auxcache is the only /var/cache/edb/ entry that stays at / even when "root" changes.
+ # XXX: Could move with a CHROOT functionality addition.
+ self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
+ del self["PORTAGE_DEPCACHEDIR"]
+
+ overlays = string.split(self["PORTDIR_OVERLAY"])
+ if overlays:
+ new_ov=[]
+ for ov in overlays:
+ ov=os.path.normpath(ov)
+ if os.path.isdir(ov):
+ new_ov.append(ov)
+ else:
+ writemsg(red("!!! Invalid PORTDIR_OVERLAY (not a dir): "+ov+"\n"))
+ self["PORTDIR_OVERLAY"] = string.join(new_ov)
+ self.backup_changes("PORTDIR_OVERLAY")
+
+ self.regenerate()
+
+ self.features = portage_util.unique_array(self["FEATURES"].split())
+
+ #XXX: Should this be temporary? Is it possible at all to have a default?
+ if "gpg" in self.features:
+ if not os.path.exists(self["PORTAGE_GPG_DIR"]) or not os.path.isdir(self["PORTAGE_GPG_DIR"]):
+ writemsg("PORTAGE_GPG_DIR is invalid. Removing gpg from FEATURES.\n")
+ self.features.remove("gpg")
+
+ if "maketest" in self.features and "test" not in self.features:
+ self.features.append("test")
+
+ if not portage_exec.sandbox_capable and ("sandbox" in self.features or "usersandbox" in self.features):
+ writemsg(red("!!! Problem with sandbox binary. Disabling...\n\n"))
+ if "sandbox" in self.features:
+ self.features.remove("sandbox")
+ if "usersandbox" in self.features:
+ self.features.remove("usersandbox")
+
+ self.features.sort()
+ self["FEATURES"] = " ".join(["-*"]+self.features)
+ self.backup_changes("FEATURES")
+
+ if not len(self["CBUILD"]):
+ self["CBUILD"] = self["CHOST"]
+ self.backup_changes("CBUILD")
+
+ if mycpv:
+ self.setcpv(mycpv)
+
+ def loadVirtuals(self,root):
+ self.virtuals = self.getvirtuals(root)
+
+ def load_best_module(self,property_string):
+ best_mod = best_from_dict(property_string,self.modules,self.module_priority)
+ return load_mod(best_mod)
+
+ def lock(self):
+ self.locked = 1
+
+ def unlock(self):
+ self.locked = 0
+
+ def modifying(self):
+ if self.locked:
+ raise Exception, "Configuration is locked."
+
+ def backup_changes(self,key=None):
+ if key and self.configdict["env"].has_key(key):
+ self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
+ else:
+ raise KeyError, "No such key defined in environment: %s" % key
+
+ def reset(self,keeping_pkg=0,use_cache=1):
+ "reset environment to original settings"
+ for x in self.configlist[-1].keys():
+ if x not in self.backupenv.keys():
+ del self.configlist[-1][x]
+
+ self.configdict["env"].update(self.backupenv)
+
+ self.modifiedkeys = []
+ if not keeping_pkg:
+ self.puse = ""
+ self.configdict["pkg"].clear()
+ self.regenerate(use_cache=use_cache)
+
+ def load_infodir(self,infodir):
+ if self.configdict.has_key("pkg"):
+ for x in self.configdict["pkg"].keys():
+ del self.configdict["pkg"][x]
+ else:
+ writemsg("No pkg setup for settings instance?\n")
+ sys.exit(17)
+
+ if os.path.exists(infodir):
+ if os.path.exists(infodir+"/environment"):
+ self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
+
+ myre = re.compile('^[A-Z]+$')
+ for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
+ if myre.match(filename):
+ try:
+ mydata = string.strip(open(infodir+"/"+filename).read())
+ if len(mydata)<2048:
+ if filename == "USE":
+ self.configdict["pkg"][filename] = "-* "+mydata
+ else:
+ self.configdict["pkg"][filename] = mydata
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename)
+ pass
+ return 1
+ return 0
+
+ def setcpv(self,mycpv,use_cache=1):
+ self.modifying()
+ self.mycpv = mycpv
+ cp = dep_getkey(mycpv)
+ newpuse = ""
+ if self.pusedict.has_key(cp):
+ self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
+ if self.pusekey:
+ newpuse = string.join(self.pusedict[cp][self.pusekey])
+ if newpuse == self.puse:
+ return
+ self.puse = newpuse
+ self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
+ self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
+ self.reset(keeping_pkg=1,use_cache=use_cache)
+
+ def setinst(self,mycpv,mydbapi):
+ # Grab the virtuals this package provides and add them into the tree virtuals.
+ provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
+ if isinstance(mydbapi, portdbapi):
+ myuse = self["USE"]
+ else:
+ myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
+ virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
+
+ cp = dep_getkey(mycpv)
+ for virt in virts:
+ virt = dep_getkey(virt)
+ if not self.treeVirtuals.has_key(virt):
+ self.treeVirtuals[virt] = []
+ # XXX: Is this bad? -- It's a permanent modification
+ self.treeVirtuals[virt] = portage_util.unique_array(self.treeVirtuals[virt]+[cp])
+
+ self.virtuals = self.__getvirtuals_compile()
+
+
+ def regenerate(self,useonly=0,use_cache=1):
+ global usesplit,profiledir
+
+ if self.already_in_regenerate:
+ # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
+ writemsg("!!! Looping in regenerate.\n",1)
+ return
+ else:
+ self.already_in_regenerate = 1
+
+ if useonly:
+ myincrementals=["USE"]
+ else:
+ myincrementals=portage_const.INCREMENTALS
+ for mykey in myincrementals:
+ if mykey=="USE":
+ mydbs=self.uvlist
+ # XXX Global usage of db... Needs to go away somehow.
+ if db.has_key(root) and db[root].has_key("vartree"):
+ self.configdict["auto"]["USE"]=autouse(db[root]["vartree"],use_cache=use_cache)
+ else:
+ self.configdict["auto"]["USE"]=""
+ else:
+ mydbs=self.configlist[:-1]
+
+ myflags=[]
+ for curdb in mydbs:
+ if not curdb.has_key(mykey):
+ continue
+ #variables are already expanded
+ mysplit=curdb[mykey].split()
+
+ for x in mysplit:
+ if x=="-*":
+ # "-*" is a special "minus" var that means "unset all settings".
+ # so USE="-* gnome" will have *just* gnome enabled.
+ myflags=[]
+ continue
+
+ if x[0]=="+":
+ # Not legal. People assume too much. Complain.
+ writemsg(red("USE flags should not start with a '+': %s\n" % x))
+ x=x[1:]
+
+ if (x[0]=="-"):
+ if (x[1:] in myflags):
+ # Unset/Remove it.
+ del myflags[myflags.index(x[1:])]
+ continue
+
+ # We got here, so add it now.
+ if x not in myflags:
+ myflags.append(x)
+
+ myflags.sort()
+ #store setting in last element of configlist, the original environment:
+ self.configlist[-1][mykey]=string.join(myflags," ")
+ del myflags
+
+ #cache split-up USE var in a global
+ usesplit=[]
+
+ for x in string.split(self.configlist[-1]["USE"]):
+ if x not in self.usemask:
+ usesplit.append(x)
+
+ if self.has_key("USE_EXPAND"):
+ for var in string.split(self["USE_EXPAND"]):
+ if self.has_key(var):
+ for x in string.split(self[var]):
+ mystr = string.lower(var)+"_"+x
+ if mystr not in usesplit:
+ usesplit.append(mystr)
+
+ # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
+ if self.configdict["defaults"].has_key("ARCH"):
+ if self.configdict["defaults"]["ARCH"]:
+ if self.configdict["defaults"]["ARCH"] not in usesplit:
+ usesplit.insert(0,self.configdict["defaults"]["ARCH"])
+
+ self.configlist[-1]["USE"]=string.join(usesplit," ")
+
+ self.already_in_regenerate = 0
+
+ def getvirtuals(self, myroot):
+ myvirts = {}
+
+ # This breaks catalyst/portage when setting to a fresh/empty root.
+ # Virtuals cannot be calculated because there is nothing to work
+ # from. So the only ROOT prefixed dir should be local configs.
+ #myvirtdirs = prefix_array(self.profiles,myroot+"/")
+ myvirtdirs = copy.deepcopy(self.profiles)
+ while self.user_profile_dir in myvirtdirs:
+ myvirtdirs.remove(self.user_profile_dir)
+
+
+ # Rules
+ # R1: Collapse profile virtuals
+ # R2: Extract user-negatives.
+ # R3: Collapse user-virtuals.
+ # R4: Apply user negatives to all except user settings.
+
+ # Order of preference:
+ # 1. user-declared that are installed
+ # 3. installed and in profile
+ # 4. installed
+ # 2. user-declared set
+ # 5. profile
+
+ self.dirVirtuals = grab_multiple("virtuals", myvirtdirs, grabdict)
+ self.dirVirtuals.reverse()
+
+ if self.user_profile_dir and os.path.exists(self.user_profile_dir+"/virtuals"):
+ self.userVirtuals = grabdict(self.user_profile_dir+"/virtuals")
+
+ # Store all the negatives for later.
+ for x in self.userVirtuals.keys():
+ self.negVirtuals[x] = []
+ for y in self.userVirtuals[x]:
+ if y[0] == '-':
+ self.negVirtuals[x].append(y[:])
+
+ # Collapse the user virtuals so that we don't deal with negatives.
+ self.userVirtuals = stack_dictlist([self.userVirtuals],incremental=1)
+
+ # Collapse all the profile virtuals including user negations.
+ self.dirVirtuals = stack_dictlist([self.negVirtuals]+self.dirVirtuals,incremental=1)
+
+ # Repoman does not use user or tree virtuals.
+ if os.environ.get("PORTAGE_CALLER","") != "repoman":
+ # XXX: vartree does not use virtuals, does user set matter?
+ temp_vartree = vartree(myroot,self.dirVirtuals,categories=self.categories)
+ # Reduce the provides into a list by CP.
+ self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
+
+ return self.__getvirtuals_compile()
+
+ def __getvirtuals_compile(self):
+ """Actually generate the virtuals we have collected.
+ The results are reversed so the list order is left to right.
+ Given data is [Best,Better,Good] sets of [Good, Better, Best]"""
+
+ # Virtuals by profile+tree preferences.
+ ptVirtuals = {}
+ # Virtuals by user+tree preferences.
+ utVirtuals = {}
+
+ # If a user virtual is already installed, we preference it.
+ for x in self.userVirtuals.keys():
+ utVirtuals[x] = []
+ if self.treeVirtuals.has_key(x):
+ for y in self.userVirtuals[x]:
+ if y in self.treeVirtuals[x]:
+ utVirtuals[x].append(y)
+ #print "F:",utVirtuals
+ #utVirtuals[x].reverse()
+ #print "R:",utVirtuals
+
+ # If a profile virtual is already installed, we preference it.
+ for x in self.dirVirtuals.keys():
+ ptVirtuals[x] = []
+ if self.treeVirtuals.has_key(x):
+ for y in self.dirVirtuals[x]:
+ if y in self.treeVirtuals[x]:
+ ptVirtuals[x].append(y)
+
+ # UserInstalled, ProfileInstalled, Installed, User, Profile
+ biglist = [utVirtuals, ptVirtuals, self.treeVirtuals,
+ self.userVirtuals, self.dirVirtuals]
+
+ # We reverse each dictlist so that the order matches everything
+ # else in portage. [-*, a, b] [b, c, d] ==> [b, a]
+ for dictlist in biglist:
+ for key in dictlist:
+ dictlist[key].reverse()
+
+ # User settings and profile settings take precedence over tree.
+ val = stack_dictlist(biglist,incremental=1)
+
+ return val
+
+ def __delitem__(self,mykey):
+ for x in self.lookuplist:
+ if x != None:
+ if mykey in x:
+ del x[mykey]
+
+ def __getitem__(self,mykey):
+ match = ''
+ for x in self.lookuplist:
+ if x == None:
+ writemsg("!!! lookuplist is null.\n")
+ elif x.has_key(mykey):
+ match = x[mykey]
+ break
+
+ if 0 and match and mykey in ["PORTAGE_BINHOST"]:
+ # These require HTTP Encoding
+ try:
+ import urllib
+ if urllib.unquote(match) != match:
+ writemsg("Note: %s already contains escape codes." % (mykey))
+ else:
+ match = urllib.quote(match)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("Failed to fix %s using urllib, attempting to continue.\n" % (mykey))
+ pass
+
+ elif mykey == "CONFIG_PROTECT_MASK":
+ match += " /etc/env.d"
+
+ return match
+
+ def has_key(self,mykey):
+ for x in self.lookuplist:
+ if x.has_key(mykey):
+ return 1
+ return 0
+
+ def keys(self):
+ mykeys=[]
+ for x in self.lookuplist:
+ for y in x.keys():
+ if y not in mykeys:
+ mykeys.append(y)
+ return mykeys
+
+ def __setitem__(self,mykey,myvalue):
+ "set a value; will be thrown away at reset() time"
+ if type(myvalue) != types.StringType:
+ raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
+ self.modifying()
+ self.modifiedkeys += [mykey]
+ self.configdict["env"][mykey]=myvalue
+
+ def environ(self):
+ "return our locally-maintained environment"
+ mydict={}
+ for x in self.keys():
+ mydict[x]=self[x]
+ if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
+ writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
+ mydict["HOME"]=mydict["BUILD_PREFIX"][:]
+
+ return mydict
+
+
+# XXX This would be to replace getstatusoutput completely.
+# XXX Issue: cannot block execution. Deadlock condition.
+def spawn(mystring,mysettings,debug=0,free=0,droppriv=0,fd_pipes=None,**keywords):
+ """spawn a subprocess with optional sandbox protection,
+ depending on whether sandbox is enabled. The "free" argument,
+ when set to 1, will disable sandboxing. This allows us to
+ spawn processes that are supposed to modify files outside of the
+ sandbox. We can't use os.system anymore because it messes up
+ signal handling. Using spawn allows our Portage signal handler
+ to work."""
+
+ if type(mysettings) == types.DictType:
+ env=mysettings
+ keywords["opt_name"]="[ %s ]" % "portage"
+ else:
+ check_config_instance(mysettings)
+ env=mysettings.environ()
+ keywords["opt_name"]="[%s]" % mysettings["PF"]
+
+ # XXX: Negative RESTRICT word
+ droppriv=(droppriv and ("userpriv" in features) and not \
+ (("nouserpriv" in string.split(mysettings["RESTRICT"])) or \
+ ("userpriv" in string.split(mysettings["RESTRICT"]))))
+
+
+ if ("sandbox" in features) and (not free):
+ keywords["opt_name"] += " sandbox"
+ if droppriv and portage_gid and portage_uid:
+ keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":[portage_gid],"umask":002})
+ return portage_exec.spawn_sandbox(mystring,env=env,**keywords)
+ else:
+ keywords["opt_name"] += " bash"
+ return portage_exec.spawn_bash(mystring,env=env,**keywords)
+
+
+
+def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
+ "fetch files. Will use digest file if available."
+
+ # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
+ if ("mirror" in mysettings["RESTRICT"].split()) or \
+ ("nomirror" in mysettings["RESTRICT"].split()):
+ if ("mirror" in features) and ("lmirror" not in features):
+ # lmirror should allow you to bypass mirror restrictions.
+ # XXX: This is not a good thing, and is temporary at best.
+ print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
+ return 1
+
+ global thirdpartymirrors
+
+ check_config_instance(mysettings)
+
+ custommirrors=grabdict(CUSTOM_MIRRORS_FILE)
+
+ mymirrors=[]
+
+ if listonly or ("distlocks" not in features):
+ use_locks = 0
+
+ fetch_to_ro = 0
+ if "skiprocheck" in features:
+ fetch_to_ro = 1
+
+ if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
+ if use_locks:
+ writemsg(red("!!! You are fetching to a read-only filesystem, you should turn locking off"));
+ writemsg("!!! This can be done by adding -distlocks to FEATURES in /etc/make.conf");
+# use_locks = 0
+
+ # local mirrors are always added
+ if custommirrors.has_key("local"):
+ mymirrors += custommirrors["local"]
+
+ if ("nomirror" in mysettings["RESTRICT"].split()) or \
+ ("mirror" in mysettings["RESTRICT"].split()):
+ # We don't add any mirrors.
+ pass
+ else:
+ if try_mirrors:
+ for x in mysettings["GENTOO_MIRRORS"].split():
+ if x:
+ if x[-1] == '/':
+ mymirrors += [x[:-1]]
+ else:
+ mymirrors += [x]
+
+ mydigests = {}
+ digestfn = mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
+ if os.path.exists(digestfn):
+ mydigests = digestParseFile(digestfn)
+
+ fsmirrors = []
+ for x in range(len(mymirrors)-1,-1,-1):
+ if mymirrors[x] and mymirrors[x][0]=='/':
+ fsmirrors += [mymirrors[x]]
+ del mymirrors[x]
+
+ for myuri in myuris:
+ myfile=os.path.basename(myuri)
+ try:
+ destdir = mysettings["DISTDIR"]+"/"
+ if not os.path.exists(destdir+myfile):
+ for mydir in fsmirrors:
+ if os.path.exists(mydir+"/"+myfile):
+ writemsg(_("Local mirror has file: %(file)s\n" % {"file":myfile}))
+ shutil.copyfile(mydir+"/"+myfile,destdir+"/"+myfile)
+ break
+ except (OSError,IOError),e:
+ # file does not exist
+ writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
+ gotit=0
+
+ if "fetch" in mysettings["RESTRICT"].split():
+ # fetch is restricted. Ensure all files have already been downloaded; otherwise,
+ # print message and exit.
+ gotit=1
+ for myuri in myuris:
+ myfile=os.path.basename(myuri)
+ try:
+ mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
+ except (OSError,IOError),e:
+ # file does not exist
+ writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
+ gotit=0
+ if not gotit:
+ print
+ print "!!!",mysettings["CATEGORY"]+"/"+mysettings["PF"],"has fetch restriction turned on."
+ print "!!! This probably means that this ebuild's files must be downloaded"
+ print "!!! manually. See the comments in the ebuild for more information."
+ print
+ spawn(EBUILD_SH_BINARY+" nofetch",mysettings)
+ return 0
+ return 1
+ locations=mymirrors[:]
+ filedict={}
+ primaryuri_indexes={}
+ for myuri in myuris:
+ myfile=os.path.basename(myuri)
+ if not filedict.has_key(myfile):
+ filedict[myfile]=[]
+ for y in range(0,len(locations)):
+ filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
+ if myuri[:9]=="mirror://":
+ eidx = myuri.find("/", 9)
+ if eidx != -1:
+ mirrorname = myuri[9:eidx]
+
+ # Try user-defined mirrors first
+ if custommirrors.has_key(mirrorname):
+ for cmirr in custommirrors[mirrorname]:
+ filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
+ # remove the mirrors we tried from the list of official mirrors
+ if cmirr.strip() in thirdpartymirrors[mirrorname]:
+ thirdpartymirrors[mirrorname].remove(cmirr)
+ # now try the official mirrors
+ if thirdpartymirrors.has_key(mirrorname):
+ try:
+ shuffle(thirdpartymirrors[mirrorname])
+ except SystemExit, e:
+ raise
+ except:
+ writemsg(red("!!! YOU HAVE A BROKEN PYTHON/GLIBC.\n"))
+ writemsg( "!!! You are most likely on a pentium4 box and have specified -march=pentium4\n")
+ writemsg( "!!! or -fpmath=sse2. GCC was generating invalid sse2 instructions in versions\n")
+ writemsg( "!!! prior to 3.2.3. Please merge the latest gcc or rebuid python with either\n")
+ writemsg( "!!! -march=pentium3 or set -mno-sse2 in your cflags.\n\n\n")
+ time.sleep(10)
+
+ for locmirr in thirdpartymirrors[mirrorname]:
+ filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
+
+ if not filedict[myfile]:
+ writemsg("No known mirror by the name: %s\n" % (mirrorname))
+ else:
+ writemsg("Invalid mirror definition in SRC_URI:\n")
+ writemsg(" %s\n" % (myuri))
+ else:
+ if "primaryuri" in mysettings["RESTRICT"].split():
+ # Use the source site first.
+ if primaryuri_indexes.has_key(myfile):
+ primaryuri_indexes[myfile] += 1
+ else:
+ primaryuri_indexes[myfile] = 0
+ filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
+ else:
+ filedict[myfile].append(myuri)
+
+ missingSourceHost = False
+ for myfile in filedict.keys(): # Gives a list, not just the first one
+ if not filedict[myfile]:
+ writemsg("Warning: No mirrors available for file '%s'\n" % (myfile))
+ missingSourceHost = True
+ if missingSourceHost:
+ return 0
+ del missingSourceHost
+
+ can_fetch=True
+ if not os.access(mysettings["DISTDIR"]+"/",os.W_OK):
+ if not fetch_to_ro:
+ print "!!! No write access to %s" % mysettings["DISTDIR"]+"/"
+ can_fetch=False
+ else:
+ mystat=os.stat(mysettings["DISTDIR"]+"/")
+ if mystat.st_gid != portage_gid:
+ try:
+ os.chown(mysettings["DISTDIR"],-1,portage_gid)
+ except OSError, oe:
+ if oe.errno == 1:
+ print red("!!!")+" Unable to chgrp of %s to portage, continuing\n" % mysettings["DISTDIR"]
+ else:
+ raise oe
+
+ # writable by portage_gid? This is specific to root, adjust perms if needed automatically.
+ if not stat.S_IMODE(mystat.st_mode) & 020:
+ try:
+ os.chmod(mysettings["DISTDIR"],stat.S_IMODE(mystat.st_mode) | 020)
+ except OSError, oe:
+ if oe.errno == 1:
+ print red("!!!")+" Unable to chmod %s to perms 0755. Non-root users will experience issues.\n" % mysettings["DISTDIR"]
+ else:
+ raise oe
+
+ if use_locks and locks_in_subdir:
+ if os.path.exists(mysettings["DISTDIR"]+"/"+locks_in_subdir):
+ if not os.access(mysettings["DISTDIR"]+"/"+locks_in_subdir,os.W_OK):
+ writemsg("!!! No write access to write to %s. Aborting.\n" % mysettings["DISTDIR"]+"/"+locks_in_subdir)
+ return 0
+ else:
+ old_umask=os.umask(0002)
+ os.mkdir(mysettings["DISTDIR"]+"/"+locks_in_subdir,0775)
+ if os.stat(mysettings["DISTDIR"]+"/"+locks_in_subdir).st_gid != portage_gid:
+ try:
+ os.chown(mysettings["DISTDIR"]+"/"+locks_in_subdir,-1,portage_gid)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ os.umask(old_umask)
+
+
+ for myfile in filedict.keys():
+ fetched=0
+ file_lock = None
+ if listonly:
+ writemsg("\n")
+ else:
+ if use_locks and can_fetch:
+ if locks_in_subdir:
+ file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
+ else:
+ file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
+ try:
+ for loc in filedict[myfile]:
+ if listonly:
+ writemsg(loc+" ")
+ continue
+ # allow different fetchcommands per protocol
+ protocol = loc[0:loc.find("://")]
+ if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
+ fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
+ else:
+ fetchcommand=mysettings["FETCHCOMMAND"]
+ if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
+ resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
+ else:
+ resumecommand=mysettings["RESUMECOMMAND"]
+
+ fetchcommand=string.replace(fetchcommand,"${DISTDIR}",mysettings["DISTDIR"])
+ resumecommand=string.replace(resumecommand,"${DISTDIR}",mysettings["DISTDIR"])
+
+ try:
+ mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
+ if mydigests.has_key(myfile):
+ #if we have the digest file, we know the final size and can resume the download.
+ if mystat[stat.ST_SIZE]<mydigests[myfile]["size"]:
+ fetched=1
+ else:
+ #we already have it downloaded, skip.
+ #if our file is bigger than the recorded size, digestcheck should catch it.
+ if not fetchonly:
+ fetched=2
+ else:
+ # Check md5sum's at each fetch for fetchonly.
+ verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
+ if not verified_ok:
+ writemsg("!!! Previously fetched file: "+str(myfile)+"\n!!! Reason: "+reason+"\nRefetching...\n\n")
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ fetched=0
+ else:
+ for x_key in mydigests[myfile].keys():
+ writemsg(">>> Previously fetched file: "+str(myfile)+" "+x_key+" ;-)\n")
+ fetched=2
+ break #No need to keep looking for this file, we have it!
+ else:
+ #we don't have the digest file, but the file exists. Assume it is fully downloaded.
+ fetched=2
+ except (OSError,IOError),e:
+ writemsg("An exception was caught(1)...\nFailing the download: %s.\n" % (str(e)),1)
+ fetched=0
+
+ if not can_fetch:
+ if fetched != 2:
+ if fetched == 0:
+ writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile)
+ else:
+ writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile)
+ return 0
+ else:
+ continue
+
+ # check if we can actually write to the directory/existing file.
+ if fetched!=2 and os.path.exists(mysettings["DISTDIR"]+"/"+myfile) != \
+ os.access(mysettings["DISTDIR"]+"/"+myfile, os.W_OK) and not fetch_to_ro:
+ writemsg(red("***")+" Lack write access to %s, failing fetch\n" % str(mysettings["DISTDIR"]+"/"+myfile))
+ fetched=0
+ break
+ elif fetched!=2:
+ #we either need to resume or start the download
+ #you can't use "continue" when you're inside a "try" block
+ if fetched==1:
+ #resume mode:
+ writemsg(">>> Resuming download...\n")
+ locfetch=resumecommand
+ else:
+ #normal mode:
+ locfetch=fetchcommand
+ writemsg(">>> Downloading "+str(loc)+"\n")
+ myfetch=string.replace(locfetch,"${URI}",loc)
+ myfetch=string.replace(myfetch,"${FILE}",myfile)
+ try:
+ if selinux_enabled:
+ con=selinux.getcontext()
+ con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_FETCH_T"])
+ selinux.setexec(con)
+ myret=spawn(myfetch,mysettings,free=1)
+ selinux.setexec(None)
+ else:
+ myret=spawn(myfetch,mysettings,free=1)
+ finally:
+ #if root, -always- set the perms.
+ if os.path.exists(mysettings["DISTDIR"]+"/"+myfile) and (fetched != 1 or os.getuid() == 0) \
+ and os.access(mysettings["DISTDIR"]+"/",os.W_OK):
+ if os.stat(mysettings["DISTDIR"]+"/"+myfile).st_gid != portage_gid:
+ try:
+ os.chown(mysettings["DISTDIR"]+"/"+myfile,-1,portage_gid)
+ except SystemExit, e:
+ raise
+ except:
+ portage_util.writemsg("chown failed on distfile: " + str(myfile))
+ os.chmod(mysettings["DISTDIR"]+"/"+myfile,0664)
+
+ if mydigests!=None and mydigests.has_key(myfile):
+ try:
+ mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
+ # no exception? file exists. let digestcheck() report
+ # an appropriately for size or md5 errors
+ if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
+ # Fetch failed... Try the next one... Kill 404 files though.
+ if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
+ html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
+ try:
+ if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
+ try:
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ continue
+ if not fetchonly:
+ fetched=2
+ break
+ else:
+ # File is the correct size--check the MD5 sum for the fetched
+ # file NOW, for those users who don't have a stable/continuous
+ # net connection. This way we have a chance to try to download
+ # from another mirror...
+ verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
+ if not verified_ok:
+ writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n!!! Reason: "+reason+"\nRemoving corrupt distfile...\n")
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ fetched=0
+ else:
+ for x_key in mydigests[myfile].keys():
+ writemsg(">>> "+str(myfile)+" "+x_key+" ;-)\n")
+ fetched=2
+ break
+ except (OSError,IOError),e:
+ writemsg("An exception was caught(2)...\nFailing the download: %s.\n" % (str(e)),1)
+ fetched=0
+ else:
+ if not myret:
+ fetched=2
+ break
+ elif mydigests!=None:
+ writemsg("No digest file available and download failed.\n\n")
+ finally:
+ if use_locks and file_lock:
+ portage_locks.unlockfile(file_lock)
+
+ if listonly:
+ writemsg("\n")
+ if (fetched!=2) and not listonly:
+ writemsg("!!! Couldn't download "+str(myfile)+". Aborting.\n")
+ return 0
+ return 1
+
+
+def digestCreate(myfiles,basedir,oldDigest={}):
+ """Takes a list of files and the directory they are in and returns the
+ dict of dict[filename][CHECKSUM_KEY] = hash
+ returns None on error."""
+ mydigests={}
+ for x in myfiles:
+ print "<<<",x
+ myfile=os.path.normpath(basedir+"///"+x)
+ if os.path.exists(myfile):
+ if not os.access(myfile, os.R_OK):
+ print "!!! Given file does not appear to be readable. Does it exist?"
+ print "!!! File:",myfile
+ return None
+ mydigests[x] = portage_checksum.perform_all(myfile)
+ mysize = os.stat(myfile)[stat.ST_SIZE]
+ else:
+ if x in oldDigest:
+ # DeepCopy because we might not have a unique reference.
+ mydigests[x] = copy.deepcopy(oldDigest[x])
+ mysize = copy.deepcopy(oldDigest[x]["size"])
+ else:
+ print "!!! We have a source URI, but no file..."
+ print "!!! File:",myfile
+ return None
+
+ if mydigests[x].has_key("size") and (mydigests[x]["size"] != mysize):
+ raise portage_exception.DigestException, "Size mismatch during checksums"
+ mydigests[x]["size"] = copy.deepcopy(mysize)
+ return mydigests
+
+def digestCreateLines(filelist, mydict):
+ mylines = []
+ mydigests = copy.deepcopy(mydict)
+ for myarchive in filelist:
+ mysize = mydigests[myarchive]["size"]
+ if len(mydigests[myarchive]) == 0:
+ raise portage_exception.DigestException, "No generate digest for '%(file)s'" % {"file":myarchive}
+ for sumName in mydigests[myarchive].keys():
+ if sumName not in portage_checksum.get_valid_checksum_keys():
+ continue
+ mysum = mydigests[myarchive][sumName]
+
+ myline = sumName[:]
+ myline += " "+mysum
+ myline += " "+myarchive
+ myline += " "+str(mysize)
+ if sumName != "MD5":
+ # XXXXXXXXXXXXXXXX This cannot be used!
+ # Older portage make very dumb assumptions about the formats.
+ # We need a lead-in period before we break everything.
+ continue
+ mylines.append(myline)
+ return mylines
+
+def digestgen(myarchives,mysettings,overwrite=1,manifestonly=0):
+ """generates digest file if missing. Assumes all files are available. If
+ overwrite=0, the digest will only be created if it doesn't already exist."""
+
+ # archive files
+ basedir=mysettings["DISTDIR"]+"/"
+ digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
+
+ # portage files -- p(ortagefiles)basedir
+ pbasedir=mysettings["O"]+"/"
+ manifestfn=pbasedir+"Manifest"
+
+ if not manifestonly:
+ if not os.path.isdir(mysettings["FILESDIR"]):
+ os.makedirs(mysettings["FILESDIR"])
+ mycvstree=cvstree.getentries(pbasedir, recursive=1)
+
+ if ("cvs" in features) and os.path.exists(pbasedir+"/CVS"):
+ if not cvstree.isadded(mycvstree,"files"):
+ if "autoaddcvs" in features:
+ print ">>> Auto-adding files/ dir to CVS..."
+ spawn("cd "+pbasedir+"; cvs add files",mysettings,free=1)
+ else:
+ print "--- Warning: files/ is not added to cvs."
+
+ if (not overwrite) and os.path.exists(digestfn):
+ return 1
+
+ print green(">>> Generating digest file...")
+
+ # Track the old digest so we can assume checksums without requiring
+ # all files to be downloaded. 'Assuming'
+ myolddigest = {}
+ if os.path.exists(digestfn):
+ myolddigest = digestParseFile(digestfn)
+
+ mydigests=digestCreate(myarchives, basedir, oldDigest=myolddigest)
+ if mydigests==None: # There was a problem, exit with an errorcode.
+ return 0
+
+ try:
+ outfile=open(digestfn, "w+")
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Filesystem error skipping generation. (Read-Only?)"
+ print "!!!",e
+ return 0
+ for x in digestCreateLines(myarchives, mydigests):
+ outfile.write(x+"\n")
+ outfile.close()
+ try:
+ os.chown(digestfn,os.getuid(),portage_gid)
+ os.chmod(digestfn,0664)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ print e
+
+ print green(">>> Generating manifest file...")
+ mypfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
+ mypfiles=cvstree.apply_cvsignore_filter(mypfiles)
+ for x in ["Manifest"]:
+ if x in mypfiles:
+ mypfiles.remove(x)
+
+ mydigests=digestCreate(mypfiles, pbasedir)
+ if mydigests==None: # There was a problem, exit with an errorcode.
+ return 0
+
+ try:
+ outfile=open(manifestfn, "w+")
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Filesystem error skipping generation. (Read-Only?)"
+ print "!!!",e
+ return 0
+ for x in digestCreateLines(mypfiles, mydigests):
+ outfile.write(x+"\n")
+ outfile.close()
+ try:
+ os.chown(manifestfn,os.getuid(),portage_gid)
+ os.chmod(manifestfn,0664)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ print e
+
+ if "cvs" in features and os.path.exists(pbasedir+"/CVS"):
+ mycvstree=cvstree.getentries(pbasedir, recursive=1)
+ myunaddedfiles=""
+ if not manifestonly and not cvstree.isadded(mycvstree,digestfn):
+ if digestfn[:len(pbasedir)]==pbasedir:
+ myunaddedfiles=digestfn[len(pbasedir):]+" "
+ else:
+ myunaddedfiles=digestfn+" "
+ if not cvstree.isadded(mycvstree,manifestfn[len(pbasedir):]):
+ if manifestfn[:len(pbasedir)]==pbasedir:
+ myunaddedfiles+=manifestfn[len(pbasedir):]+" "
+ else:
+ myunaddedfiles+=manifestfn
+ if myunaddedfiles:
+ if "autoaddcvs" in features:
+ print blue(">>> Auto-adding digest file(s) to CVS...")
+ spawn("cd "+pbasedir+"; cvs add "+myunaddedfiles,mysettings,free=1)
+ else:
+ print "--- Warning: digests are not yet added into CVS."
+ print darkgreen(">>> Computed message digests.")
+ print
+ return 1
+
+
+def digestParseFile(myfilename):
+ """(filename) -- Parses a given file for entries matching:
+ MD5 MD5_STRING_OF_HEX_CHARS FILE_NAME FILE_SIZE
+ Ignores lines that do not begin with 'MD5' and returns a
+ dict with the filenames as keys and [md5,size] as the values."""
+
+ if not os.path.exists(myfilename):
+ return None
+ mylines = portage_util.grabfile(myfilename, compat_level=1)
+
+ mydigests={}
+ for x in mylines:
+ myline=string.split(x)
+ if len(myline) < 4:
+ #invalid line
+ continue
+ if myline[0] not in portage_checksum.get_valid_checksum_keys():
+ continue
+ mykey = myline.pop(0)
+ myhash = myline.pop(0)
+ mysize = long(myline.pop())
+ myfn = string.join(myline, " ")
+ if myfn not in mydigests:
+ mydigests[myfn] = {}
+ mydigests[myfn][mykey] = myhash
+ if "size" in mydigests[myfn]:
+ if mydigests[myfn]["size"] != mysize:
+ raise portage_exception.DigestException, "Conflicting sizes in digest: %(filename)s" % {"filename":myfilename}
+ else:
+ mydigests[myfn]["size"] = mysize
+ return mydigests
+
+# XXXX strict was added here to fix a missing name error.
+# XXXX It's used below, but we're not paying attention to how we get it?
+def digestCheckFiles(myfiles, mydigests, basedir, note="", strict=0):
+ """(fileslist, digestdict, basedir) -- Takes a list of files and a dict
+ of their digests and checks the digests against the indicated files in
+ the basedir given. Returns 1 only if all files exist and match the md5s.
+ """
+ for x in myfiles:
+ if not mydigests.has_key(x):
+ print
+ print red("!!! No message digest entry found for file \""+x+".\"")
+ print "!!! Most likely a temporary problem. Try 'emerge sync' again later."
+ print "!!! If you are certain of the authenticity of the file then you may type"
+ print "!!! the following to generate a new digest:"
+ print "!!! ebuild /usr/portage/category/package/package-version.ebuild digest"
+ return 0
+ myfile=os.path.normpath(basedir+"/"+x)
+ if not os.path.exists(myfile):
+ if strict:
+ print "!!! File does not exist:",myfile
+ return 0
+ continue
+
+ ok,reason = portage_checksum.verify_all(myfile,mydigests[x])
+ if not ok:
+ print
+ print red("!!! Digest verification Failed:")
+ print red("!!!")+" "+str(myfile)
+ print red("!!! Reason: ")+reason
+ print
+ return 0
+ else:
+ print ">>> md5 "+note+" ;-)",x
+ return 1
+
+
+def digestcheck(myfiles, mysettings, strict=0):
+ """Checks md5sums. Assumes all files have been downloaded."""
+ # archive files
+ basedir=mysettings["DISTDIR"]+"/"
+ digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
+
+ # portage files -- p(ortagefiles)basedir
+ pbasedir=mysettings["O"]+"/"
+ manifestfn=pbasedir+"Manifest"
+
+ if not (os.path.exists(digestfn) and os.path.exists(manifestfn)):
+ if "digest" in features:
+ print ">>> No package digest/Manifest file found."
+ print ">>> \"digest\" mode enabled; auto-generating new digest..."
+ return digestgen(myfiles,mysettings)
+ else:
+ if not os.path.exists(manifestfn):
+ if strict:
+ print red("!!! No package manifest found:"),manifestfn
+ return 0
+ else:
+ print "--- No package manifest found:",manifestfn
+ if not os.path.exists(digestfn):
+ print "!!! No package digest file found:",digestfn
+ print "!!! Type \"ebuild foo.ebuild digest\" to generate it."
+ return 0
+
+ mydigests=digestParseFile(digestfn)
+ if mydigests==None:
+ print "!!! Failed to parse digest file:",digestfn
+ return 0
+ mymdigests=digestParseFile(manifestfn)
+ if "strict" not in features:
+ # XXX: Remove this when manifests become mainstream.
+ pass
+ elif mymdigests==None:
+ print "!!! Failed to parse manifest file:",manifestfn
+ if strict:
+ return 0
+ else:
+ # Check the portage-related files here.
+ mymfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
+ manifest_files = mymdigests.keys()
+ for x in ["Manifest", "ChangeLog", "metadata.xml"]:
+ while x in mymfiles:
+ mymfiles.remove(x)
+ while x in manifest_files:
+ manifest_files.remove(x)
+ for x in range(len(mymfiles)-1,-1,-1):
+ if mymfiles[x] in manifest_files:
+ manifest_files.remove(mymfiles[x])
+ elif len(cvstree.apply_cvsignore_filter([mymfiles[x]]))==0:
+ # we filter here, rather then above; manifest might have files flagged by the filter.
+ # if something is returned, then it's flagged as a bad file
+ # manifest doesn't know about it, so we kill it here.
+ del mymfiles[x]
+ else:
+ print red("!!! Security Violation: A file exists that is not in the manifest.")
+ print "!!! File:",mymfiles[x]
+ if strict:
+ return 0
+ if manifest_files and strict:
+ print red("!!! Files listed in the manifest do not exist!")
+ for x in manifest_files:
+ print x
+ return 0
+
+ if not digestCheckFiles(mymfiles, mymdigests, pbasedir, note="files ", strict=strict):
+ if strict:
+ print ">>> Please ensure you have sync'd properly. Please try '"+bold("emerge sync")+"' and"
+ print ">>> optionally examine the file(s) for corruption. "+bold("A sync will fix most cases.")
+ print
+ return 0
+ else:
+ print "--- Manifest check failed. 'strict' not enabled; ignoring."
+ print
+
+ # Just return the status, as it's the last check.
+ return digestCheckFiles(myfiles, mydigests, basedir, note="src_uri", strict=strict)
+
+# parse actionmap to spawn ebuild with the appropriate args
+def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
+ if alwaysdep or ("noauto" not in features):
+ # process dependency first
+ if "dep" in actionmap[mydo].keys():
+ retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
+ if retval:
+ return retval
+ # spawn ebuild.sh
+ mycommand = EBUILD_SH_BINARY + " "
+ if selinux_enabled and ("sesandbox" in features) and (mydo in ["unpack","compile","test","install"]):
+ con=selinux.getcontext()
+ con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_SANDBOX_T"])
+ selinux.setexec(con)
+ retval=spawn(mycommand + mydo,mysettings,debug=debug,
+ free=actionmap[mydo]["args"][0],
+ droppriv=actionmap[mydo]["args"][1],logfile=logfile)
+ selinux.setexec(None)
+ else:
+ retval=spawn(mycommand + mydo,mysettings, debug=debug,
+ free=actionmap[mydo]["args"][0],
+ droppriv=actionmap[mydo]["args"][1],logfile=logfile)
+ return retval
+
+def doebuild(myebuild,mydo,myroot,mysettings,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,use_cache=1,fetchall=0,tree="porttree"):
+ global db
+
+ ebuild_path = os.path.abspath(myebuild)
+ pkg_dir = os.path.dirname(ebuild_path)
+
+ if mysettings.configdict["pkg"].has_key("CATEGORY"):
+ cat = mysettings.configdict["pkg"]["CATEGORY"]
+ else:
+ cat = os.path.basename(os.path.normpath(pkg_dir+"/.."))
+ mypv = os.path.basename(ebuild_path)[:-7]
+ mycpv = cat+"/"+mypv
+
+ mysplit=pkgsplit(mypv,silent=0)
+ if mysplit==None:
+ writemsg("!!! Error: PF is null '%s'; exiting.\n" % mypv)
+ return 1
+
+ if mydo != "depend":
+ # XXX: We're doing a little hack here to curtain the gvisible locking
+ # XXX: that creates a deadlock... Really need to isolate that.
+ mysettings.reset(use_cache=use_cache)
+ mysettings.setcpv(mycpv,use_cache=use_cache)
+
+ validcommands = ["help","clean","prerm","postrm","preinst","postinst",
+ "config","setup","depend","fetch","digest",
+ "unpack","compile","test","install","rpm","qmerge","merge",
+ "package","unmerge", "manifest"]
+
+ if mydo not in validcommands:
+ validcommands.sort()
+ writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo)
+ for vcount in range(len(validcommands)):
+ if vcount%6 == 0:
+ writemsg("\n!!! ")
+ writemsg(string.ljust(validcommands[vcount], 11))
+ writemsg("\n")
+ return 1
+
+ if not os.path.exists(myebuild):
+ writemsg("!!! doebuild: "+str(myebuild)+" not found for "+str(mydo)+"\n")
+ return 1
+
+ if debug: # Otherwise it overrides emerge's settings.
+ # We have no other way to set debug... debug can't be passed in
+ # due to how it's coded... Don't overwrite this so we can use it.
+ mysettings["PORTAGE_DEBUG"]=str(debug)
+
+ mysettings["ROOT"] = myroot
+ mysettings["STARTDIR"] = getcwd()
+
+ mysettings["EBUILD"] = ebuild_path
+ mysettings["O"] = pkg_dir
+ mysettings["CATEGORY"] = cat
+ mysettings["FILESDIR"] = pkg_dir+"/files"
+ mysettings["PF"] = mypv
+
+ mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
+ mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
+
+ mysettings["PROFILE_PATHS"] = string.join(mysettings.profiles,"\n")+"\n"+CUSTOM_PROFILE_PATH
+ mysettings["P"] = mysplit[0]+"-"+mysplit[1]
+ mysettings["PN"] = mysplit[0]
+ mysettings["PV"] = mysplit[1]
+ mysettings["PR"] = mysplit[2]
+
+ if mydo != "depend":
+ try:
+ mysettings["INHERITED"], mysettings["RESTRICT"] = db[root][tree].dbapi.aux_get( \
+ mycpv,["INHERITED","RESTRICT"])
+ mysettings["PORTAGE_RESTRICT"]=string.join(flatten(portage_dep.use_reduce(portage_dep.paren_reduce( \
+ mysettings["RESTRICT"]), uselist=mysettings["USE"].split())),' ')
+ except SystemExit, e:
+ raise
+ except:
+ pass
+
+ if mysplit[2] == "r0":
+ mysettings["PVR"]=mysplit[1]
+ else:
+ mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
+
+ mysettings["SLOT"]=""
+
+ if mysettings.has_key("PATH"):
+ mysplit=string.split(mysettings["PATH"],":")
+ else:
+ mysplit=[]
+ if PORTAGE_BIN_PATH not in mysplit:
+ mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
+
+
+ mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
+ mysettings["HOME"] = mysettings["BUILD_PREFIX"]+"/homedir"
+ mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/portage-pkg"
+ mysettings["BUILDDIR"] = mysettings["BUILD_PREFIX"]+"/"+mysettings["PF"]
+
+ mysettings["PORTAGE_BASHRC"] = EBUILD_SH_ENV_FILE
+
+ #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
+ if (mydo!="depend") or not mysettings.has_key("KV"):
+ mykv,err1=ExtractKernelVersion(root+"usr/src/linux")
+ if mykv:
+ # Regular source tree
+ mysettings["KV"]=mykv
+ else:
+ mysettings["KV"]=""
+
+ if (mydo!="depend") or not mysettings.has_key("KVERS"):
+ myso=os.uname()[2]
+ mysettings["KVERS"]=myso[1]
+
+
+ # get possible slot information from the deps file
+ if mydo=="depend":
+ if mysettings.has_key("PORTAGE_DEBUG") and mysettings["PORTAGE_DEBUG"]=="1":
+ # XXX: This needs to use a FD for saving the output into a file.
+ # XXX: Set this up through spawn
+ pass
+ writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
+ if dbkey:
+ mysettings["dbkey"] = dbkey
+ else:
+ mysettings["dbkey"] = mysettings.depcachedir+"/aux_db_key_temp"
+
+ retval = spawn(EBUILD_SH_BINARY+" depend",mysettings)
+ return retval
+
+ logfile=None
+ # Build directory creation isn't required for any of these.
+ if mydo not in ["fetch","digest","manifest"]:
+
+ if not os.path.exists(mysettings["BUILD_PREFIX"]):
+ os.makedirs(mysettings["BUILD_PREFIX"])
+ os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
+ os.chmod(mysettings["BUILD_PREFIX"],00775)
+
+ # Should be ok again to set $T, as sandbox does not depend on it
+ mysettings["T"]=mysettings["BUILDDIR"]+"/temp"
+ if cleanup or mydo=="clean":
+ if os.path.exists(mysettings["T"]):
+ shutil.rmtree(mysettings["T"])
+ if not os.path.exists(mysettings["T"]):
+ os.makedirs(mysettings["T"])
+ os.chown(mysettings["T"],portage_uid,portage_gid)
+ os.chmod(mysettings["T"],02770)
+
+ try: # XXX: negative RESTRICT
+ if not (("nouserpriv" in string.split(mysettings["PORTAGE_RESTRICT"])) or \
+ ("userpriv" in string.split(mysettings["PORTAGE_RESTRICT"]))):
+ if ("userpriv" in features) and (portage_uid and portage_gid):
+ if (secpass==2):
+ if os.path.exists(mysettings["HOME"]):
+ # XXX: Potentially bad, but held down by HOME replacement above.
+ spawn("rm -Rf "+mysettings["HOME"],mysettings, free=1)
+ if not os.path.exists(mysettings["HOME"]):
+ os.makedirs(mysettings["HOME"])
+ elif ("userpriv" in features):
+ print "!!! Disabling userpriv from features... Portage UID/GID not valid."
+ del features[features.index("userpriv")]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Couldn't empty HOME:",mysettings["HOME"]
+ print "!!!",e
+
+ try:
+ # no reason to check for depend since depend returns above.
+ if not os.path.exists(mysettings["BUILD_PREFIX"]):
+ os.makedirs(mysettings["BUILD_PREFIX"])
+ os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
+ if not os.path.exists(mysettings["BUILDDIR"]):
+ os.makedirs(mysettings["BUILDDIR"])
+ os.chown(mysettings["BUILDDIR"],portage_uid,portage_gid)
+ except OSError, e:
+ print "!!! File system problem. (ReadOnly? Out of space?)"
+ print "!!! Perhaps: rm -Rf",mysettings["BUILD_PREFIX"]
+ print "!!!",str(e)
+ return 1
+
+ try:
+ if not os.path.exists(mysettings["HOME"]):
+ os.makedirs(mysettings["HOME"])
+ os.chown(mysettings["HOME"],portage_uid,portage_gid)
+ os.chmod(mysettings["HOME"],02770)
+ except OSError, e:
+ print "!!! File system problem. (ReadOnly? Out of space?)"
+ print "!!! Failed to create fake home directory in BUILDDIR"
+ print "!!!",str(e)
+ return 1
+
+ try:
+ if ("ccache" in features):
+ if (not mysettings.has_key("CCACHE_DIR")) or (mysettings["CCACHE_DIR"]==""):
+ mysettings["CCACHE_DIR"]=mysettings["PORTAGE_TMPDIR"]+"/ccache"
+ if not os.path.exists(mysettings["CCACHE_DIR"]):
+ os.makedirs(mysettings["CCACHE_DIR"])
+ mystat = os.stat(mysettings["CCACHE_DIR"])
+ if ("userpriv" in features):
+ if mystat[stat.ST_UID] != portage_gid or ((mystat[stat.ST_MODE]&02070)!=02070):
+ spawn("chgrp -R "+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("chown "+str(portage_uid)+":"+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("chmod -R u+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("chmod -R g+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ else:
+ if mystat[stat.ST_UID] != 0 or ((mystat[stat.ST_MODE]&02070)!=02070):
+ spawn("chgrp -R "+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("chown 0:"+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("chmod -R u+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ spawn("chmod -R g+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
+ except OSError, e:
+ print "!!! File system problem. (ReadOnly? Out of space?)"
+ print "!!! Perhaps: rm -Rf",mysettings["BUILD_PREFIX"]
+ print "!!!",str(e)
+ return 1
+
+ #try:
+ # mystat=os.stat(mysettings["CCACHE_DIR"])
+ # if (mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02070)!=02070):
+ # print "*** Adjusting ccache permissions for portage user..."
+ # os.chown(mysettings["CCACHE_DIR"],portage_uid,portage_gid)
+ # os.chmod(mysettings["CCACHE_DIR"],02770)
+ # spawn("chown -R "+str(portage_uid)+":"+str(portage_gid)+" "+mysettings["CCACHE_DIR"],mysettings, free=1)
+ # spawn("chmod -R g+rw "+mysettings["CCACHE_DIR"],mysettings, free=1)
+ #except SystemExit, e:
+ # raise
+ #except:
+ # pass
+
+ if "distcc" in features:
+ try:
+ if (not mysettings.has_key("DISTCC_DIR")) or (mysettings["DISTCC_DIR"]==""):
+ mysettings["DISTCC_DIR"]=mysettings["PORTAGE_TMPDIR"]+"/portage/.distcc"
+ if not os.path.exists(mysettings["DISTCC_DIR"]):
+ os.makedirs(mysettings["DISTCC_DIR"])
+ os.chown(mysettings["DISTCC_DIR"],portage_uid,portage_gid)
+ os.chmod(mysettings["DISTCC_DIR"],02775)
+ for x in ("/lock", "/state"):
+ if not os.path.exists(mysettings["DISTCC_DIR"]+x):
+ os.mkdir(mysettings["DISTCC_DIR"]+x)
+ os.chown(mysettings["DISTCC_DIR"]+x,portage_uid,portage_gid)
+ os.chmod(mysettings["DISTCC_DIR"]+x,02775)
+ except OSError, e:
+ writemsg("\n!!! File system problem when setting DISTCC_DIR directory permissions.\n")
+ writemsg( "!!! DISTCC_DIR="+str(mysettings["DISTCC_DIR"]+"\n"))
+ writemsg( "!!! "+str(e)+"\n\n")
+ time.sleep(5)
+ features.remove("distcc")
+ mysettings["DISTCC_DIR"]=""
+
+ mysettings["WORKDIR"]=mysettings["BUILDDIR"]+"/work"
+ mysettings["D"]=mysettings["BUILDDIR"]+"/image/"
+
+ if mysettings.has_key("PORT_LOGDIR"):
+ if os.access(mysettings["PORT_LOGDIR"]+"/",os.W_OK):
+ try:
+ os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
+ os.chmod(mysettings["PORT_LOGDIR"],02770)
+ if not mysettings.has_key("LOG_PF") or (mysettings["LOG_PF"] != mysettings["PF"]):
+ mysettings["LOG_PF"]=mysettings["PF"]
+ mysettings["LOG_COUNTER"]=str(db[myroot]["vartree"].dbapi.get_counter_tick_core("/"))
+ logfile="%s/%s-%s.log" % (mysettings["PORT_LOGDIR"],mysettings["LOG_COUNTER"],mysettings["LOG_PF"])
+ except ValueError, e:
+ mysettings["PORT_LOGDIR"]=""
+ print "!!! Unable to chown/chmod PORT_LOGDIR. Disabling logging."
+ print "!!!",e
+ else:
+ print "!!! Cannot create log... No write access / Does not exist"
+ print "!!! PORT_LOGDIR:",mysettings["PORT_LOGDIR"]
+ mysettings["PORT_LOGDIR"]=""
+
+ if mydo=="unmerge":
+ return unmerge(mysettings["CATEGORY"],mysettings["PF"],myroot,mysettings)
+
+ # if any of these are being called, handle them -- running them out of the sandbox -- and stop now.
+ if mydo=="clean":
+ logfile=None
+ if mydo in ["help","clean","setup"]:
+ return spawn(EBUILD_SH_BINARY+" "+mydo,mysettings,debug=debug,free=1,logfile=logfile)
+ elif mydo in ["prerm","postrm","preinst","postinst","config"]:
+ mysettings.load_infodir(pkg_dir)
+ return spawn(EBUILD_SH_BINARY+" "+mydo,mysettings,debug=debug,free=1,logfile=logfile)
+
+ try:
+ mysettings["SLOT"],mysettings["RESTRICT"] = db["/"]["porttree"].dbapi.aux_get(mycpv,["SLOT","RESTRICT"])
+ except (IOError,KeyError):
+ print red("doebuild():")+" aux_get() error reading "+mycpv+"; aborting."
+ sys.exit(1)
+
+ newuris, alist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=mysettings)
+ alluris, aalist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=mysettings,all=1)
+ mysettings["A"]=string.join(alist," ")
+ mysettings["AA"]=string.join(aalist," ")
+ if ("mirror" in features) or fetchall:
+ fetchme=alluris[:]
+ checkme=aalist[:]
+ elif mydo=="digest":
+ fetchme=alluris[:]
+ checkme=aalist[:]
+ digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
+ if os.path.exists(digestfn):
+ mydigests=digestParseFile(digestfn)
+ if mydigests:
+ for x in mydigests:
+ while x in checkme:
+ i = checkme.index(x)
+ del fetchme[i]
+ del checkme[i]
+ else:
+ fetchme=newuris[:]
+ checkme=alist[:]
+
+ try:
+ if not os.path.exists(mysettings["DISTDIR"]):
+ os.makedirs(mysettings["DISTDIR"])
+ if not os.path.exists(mysettings["DISTDIR"]+"/cvs-src"):
+ os.makedirs(mysettings["DISTDIR"]+"/cvs-src")
+ except OSError, e:
+ print "!!! File system problem. (Bad Symlink?)"
+ print "!!! Fetching may fail:",str(e)
+
+ try:
+ mystat=os.stat(mysettings["DISTDIR"]+"/cvs-src")
+ if ((mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02770)!=02770)) and not listonly:
+ print "*** Adjusting cvs-src permissions for portage user..."
+ os.chown(mysettings["DISTDIR"]+"/cvs-src",0,portage_gid)
+ os.chmod(mysettings["DISTDIR"]+"/cvs-src",02770)
+ spawn("chgrp -R "+str(portage_gid)+" "+mysettings["DISTDIR"]+"/cvs-src", free=1)
+ spawn("chmod -R g+rw "+mysettings["DISTDIR"]+"/cvs-src", free=1)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+
+ if not fetch(fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
+ return 1
+
+ if mydo=="fetch" and listonly:
+ return 0
+
+ if "digest" in features:
+ #generate digest if it doesn't exist.
+ if mydo=="digest":
+ return (not digestgen(aalist,mysettings,overwrite=1))
+ else:
+ digestgen(aalist,mysettings,overwrite=0)
+ elif mydo=="digest":
+ #since we are calling "digest" directly, recreate the digest even if it already exists
+ return (not digestgen(aalist,mysettings,overwrite=1))
+ if mydo=="manifest":
+ return (not digestgen(aalist,mysettings,overwrite=1,manifestonly=1))
+
+ if not digestcheck(checkme, mysettings, ("strict" in features)):
+ return 1
+
+ if mydo=="fetch":
+ return 0
+
+ #initial dep checks complete; time to process main commands
+
+ nosandbox=(("userpriv" in features) and ("usersandbox" not in features))
+ actionmap={
+ "depend": { "args":(0,1)}, # sandbox / portage
+ "setup": { "args":(1,0)}, # without / root
+ "unpack": {"dep":"setup", "args":(0,1)}, # sandbox / portage
+ "compile": {"dep":"unpack", "args":(nosandbox,1)}, # optional / portage
+ "test": {"dep":"compile", "args":(nosandbox,1)}, # optional / portage
+ "install": {"dep":"test", "args":(0,0)}, # sandbox / root
+ "rpm": {"dep":"install", "args":(0,0)}, # sandbox / root
+ "package": {"dep":"install", "args":(0,0)}, # sandbox / root
+ }
+
+ if mydo in actionmap.keys():
+ if mydo=="package":
+ for x in ["","/"+mysettings["CATEGORY"],"/All"]:
+ if not os.path.exists(mysettings["PKGDIR"]+x):
+ os.makedirs(mysettings["PKGDIR"]+x)
+ # REBUILD CODE FOR TBZ2 --- XXXX
+ return spawnebuild(mydo,actionmap,mysettings,debug,logfile=logfile)
+ elif mydo=="qmerge":
+ #check to ensure install was run. this *only* pops up when users forget it and are using ebuild
+ if not os.path.exists(mysettings["BUILDDIR"]+"/.installed"):
+ print "!!! mydo=qmerge, but install phase hasn't been ran"
+ sys.exit(1)
+ #qmerge is specifically not supposed to do a runtime dep check
+ return merge(mysettings["CATEGORY"],mysettings["PF"],mysettings["D"],mysettings["BUILDDIR"]+"/build-info",myroot,mysettings)
+ elif mydo=="merge":
+ retval=spawnebuild("install",actionmap,mysettings,debug,alwaysdep=1,logfile=logfile)
+ if retval:
+ return retval
+ return merge(mysettings["CATEGORY"],mysettings["PF"],mysettings["D"],mysettings["BUILDDIR"]+"/build-info",myroot,mysettings,myebuild=mysettings["EBUILD"])
+ else:
+ print "!!! Unknown mydo:",mydo
+ sys.exit(1)
+
+expandcache={}
+
+def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
+ """moves a file from src to dest, preserving all permissions and attributes; mtime will
+ be preserved even when moving across filesystems. Returns true on success and false on
+ failure. Move is atomic."""
+ #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
+ global lchown
+
+ try:
+ if not sstat:
+ sstat=os.lstat(src)
+ if bsd_chflags:
+ sflags=bsd_chflags.lgetflags(src)
+ if sflags < 0:
+ # Problem getting flags...
+ writemsg("!!! Couldn't get flags for "+dest+"\n")
+ return None
+
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Stating source file failed... movefile()"
+ print "!!!",e
+ return None
+
+ destexists=1
+ try:
+ dstat=os.lstat(dest)
+ except SystemExit, e:
+ raise
+ except:
+ dstat=os.lstat(os.path.dirname(dest))
+ destexists=0
+
+ if bsd_chflags:
+ # Check that we can actually unset schg etc flags...
+ # Clear the flags on source and destination; we'll reinstate them after merging
+ if(destexists):
+ if bsd_chflags.lchflags(dest, 0) < 0:
+ writemsg("!!! Couldn't clear flags on file being merged: \n ")
+ # We might have an immutable flag on the parent dir; save and clear.
+ pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
+ bsd_chflags.lchflags(os.path.dirname(dest), 0)
+
+ # Don't bother checking the return value here; if it fails then the next line will catch it.
+ bsd_chflags.lchflags(src, 0)
+
+ if bsd_chflags.lhasproblems(src)>0 or (destexists and bsd_chflags.lhasproblems(dest)>0) or bsd_chflags.lhasproblems(os.path.dirname(dest))>0:
+ # This is bad: we can't merge the file with these flags set.
+ writemsg("!!! Can't merge file "+dest+" because of flags set\n")
+ return None
+
+ if destexists:
+ if stat.S_ISLNK(dstat[stat.ST_MODE]):
+ try:
+ os.unlink(dest)
+ destexists=0
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ try:
+ target=os.readlink(src)
+ if mysettings and mysettings["D"]:
+ if target.find(mysettings["D"])==0:
+ target=target[len(mysettings["D"]):]
+ if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+ os.unlink(dest)
+ if selinux_enabled:
+ sid = selinux.get_lsid(src)
+ selinux.secure_symlink(target,dest,sid)
+ else:
+ os.symlink(target,dest)
+ lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ if bsd_chflags:
+ # Restore the flags we saved before moving
+ if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
+ writemsg("!!! Couldn't restore flags ("+str(flags)+") on " + dest+":\n")
+ writemsg("!!! %s\n" % str(e))
+ return None
+ return os.lstat(dest)[stat.ST_MTIME]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! failed to properly create symlink:"
+ print "!!!",dest,"->",target
+ print "!!!",e
+ return None
+
+ renamefailed=1
+ if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
+ try:
+ if selinux_enabled:
+ ret=selinux.secure_rename(src,dest)
+ else:
+ ret=os.rename(src,dest)
+ renamefailed=0
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ import errno
+ if e[0]!=errno.EXDEV:
+ # Some random error.
+ print "!!! Failed to move",src,"to",dest
+ print "!!!",e
+ return None
+ # Invalid cross-device-link 'bind' mounted or actually Cross-Device
+ if renamefailed:
+ didcopy=0
+ if stat.S_ISREG(sstat[stat.ST_MODE]):
+ try: # For safety copy then move it over.
+ if selinux_enabled:
+ selinux.secure_copy(src,dest+"#new")
+ selinux.secure_rename(dest+"#new",dest)
+ else:
+ shutil.copyfile(src,dest+"#new")
+ os.rename(dest+"#new",dest)
+ didcopy=1
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print '!!! copy',src,'->',dest,'failed.'
+ print "!!!",e
+ return None
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ if selinux_enabled:
+ a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
+ else:
+ a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
+ if a[0]!=0:
+ print "!!! Failed to move special file:"
+ print "!!! '"+src+"' to '"+dest+"'"
+ print "!!!",a
+ return None # failure
+ try:
+ if didcopy:
+ lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
+ os.unlink(src)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Failed to chown/chmod/unlink in movefile()"
+ print "!!!",dest
+ print "!!!",e
+ return None
+
+ if newmtime:
+ os.utime(dest,(newmtime,newmtime))
+ else:
+ os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
+ newmtime=sstat[stat.ST_MTIME]
+
+ if bsd_chflags:
+ # Restore the flags we saved before moving
+ if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
+ writemsg("!!! Couldn't restore flags ("+str(sflags)+") on " + dest+":\n")
+ return None
+
+ return newmtime
+
+def merge(mycat,mypkg,pkgloc,infloc,myroot,mysettings,myebuild=None):
+ mylink=dblink(mycat,mypkg,myroot,mysettings)
+ return mylink.merge(pkgloc,infloc,myroot,myebuild)
+
+def unmerge(cat,pkg,myroot,mysettings,mytrimworld=1):
+ mylink=dblink(cat,pkg,myroot,mysettings)
+ if mylink.exists():
+ mylink.unmerge(trimworld=mytrimworld,cleanup=1)
+ mylink.delete()
+
+def relparse(myver):
+ "converts last version part into three components"
+ number=0
+ suffix=0
+ endtype=0
+ endnumber=0
+
+ mynewver=string.split(myver,"_")
+ myver=mynewver[0]
+
+ #normal number or number with letter at end
+ divider=len(myver)-1
+ if myver[divider:] not in "1234567890":
+ #letter at end
+ suffix=ord(myver[divider:])
+ number=string.atof(myver[0:divider])
+ else:
+ number=string.atof(myver)
+
+ if len(mynewver)==2:
+ #an endversion
+ for x in endversion_keys:
+ elen=len(x)
+ if mynewver[1][:elen] == x:
+ match=1
+ endtype=endversion[x]
+ try:
+ endnumber=string.atof(mynewver[1][elen:])
+ except SystemExit, e:
+ raise
+ except:
+ endnumber=0
+ break
+ return [number,suffix,endtype,endnumber]
+
+#returns 1 if valid version string, else 0
+# valid string in format: <v1>.<v2>...<vx>[a-z,_{endversion}[vy]]
+# ververify doesn't do package rev.
+
+vercache={}
+def ververify(myorigval,silent=1):
+ try:
+ return vercache[myorigval]
+ except KeyError:
+ pass
+ if len(myorigval)==0:
+ if not silent:
+ print "!!! Name error: package contains empty \"-\" part."
+ return 0
+ myval=string.split(myorigval,'.')
+ if len(myval)==0:
+ if not silent:
+ print "!!! Name error: empty version string."
+ vercache[myorigval]=0
+ return 0
+ #all but the last version must be a numeric
+ for x in myval[:-1]:
+ if not len(x):
+ if not silent:
+ print "!!! Name error in",myorigval+": two decimal points in a row"
+ vercache[myorigval]=0
+ return 0
+ try:
+ foo=int(x)
+ except SystemExit, e:
+ raise
+ except:
+ if not silent:
+ print "!!! Name error in",myorigval+": \""+x+"\" is not a valid version component."
+ vercache[myorigval]=0
+ return 0
+ if not len(myval[-1]):
+ if not silent:
+ print "!!! Name error in",myorigval+": two decimal points in a row"
+ vercache[myorigval]=0
+ return 0
+ try:
+ foo=int(myval[-1])
+ vercache[myorigval]=1
+ return 1
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ #ok, our last component is not a plain number or blank, let's continue
+ if myval[-1][-1] in string.lowercase:
+ try:
+ foo=int(myval[-1][:-1])
+ vercache[myorigval]=1
+ return 1
+ # 1a, 2.0b, etc.
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ #ok, maybe we have a 1_alpha or 1_beta2; let's see
+ #ep="endpart"
+ ep=string.split(myval[-1],"_")
+ if len(ep)!=2:
+ if not silent:
+ print "!!! Name error in",myorigval
+ vercache[myorigval]=0
+ return 0
+ try:
+ foo=int(ep[0][-1])
+ chk=ep[0]
+ except SystemExit, e:
+ raise
+ except:
+ # because it's ok last char is not numeric. example: foo-1.0.0a_pre1
+ chk=ep[0][:-1]
+
+ try:
+ foo=int(chk)
+ except SystemExit, e:
+ raise
+ except:
+ #this needs to be numeric or numeric+single letter,
+ #i.e. the "1" in "1_alpha" or "1a_alpha"
+ if not silent:
+ print "!!! Name error in",myorigval+": characters before _ must be numeric or numeric+single letter"
+ vercache[myorigval]=0
+ return 0
+ for mye in endversion_keys:
+ if ep[1][0:len(mye)]==mye:
+ if len(mye)==len(ep[1]):
+ #no trailing numeric; ok
+ vercache[myorigval]=1
+ return 1
+ else:
+ try:
+ foo=int(ep[1][len(mye):])
+ vercache[myorigval]=1
+ return 1
+ except SystemExit, e:
+ raise
+ except:
+ #if no endversions work, *then* we return 0
+ pass
+ if not silent:
+ print "!!! Name error in",myorigval
+ vercache[myorigval]=0
+ return 0
+
+def isvalidatom(atom):
+ mycpv_cps = catpkgsplit(dep_getcpv(atom))
+ operator = get_operator(atom)
+ if operator:
+ if mycpv_cps and mycpv_cps[0] != "null":
+ # >=cat/pkg-1.0
+ return 1
+ else:
+ # >=cat/pkg or >=pkg-1.0 (no category)
+ return 0
+ if mycpv_cps:
+ # cat/pkg-1.0
+ return 0
+
+ if (len(string.split(atom, '/'))==2):
+ # cat/pkg
+ return 1
+ else:
+ return 0
+
+def isjustname(mypkg):
+ myparts=string.split(mypkg,'-')
+ for x in myparts:
+ if ververify(x):
+ return 0
+ return 1
+
+iscache={}
+def isspecific(mypkg):
+ "now supports packages with no category"
+ try:
+ return iscache[mypkg]
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ mysplit=string.split(mypkg,"/")
+ if not isjustname(mysplit[-1]):
+ iscache[mypkg]=1
+ return 1
+ iscache[mypkg]=0
+ return 0
+
+# This function can be used as a package verification function, i.e.
+# "pkgsplit("foo-1.2-1") will return None if foo-1.2-1 isn't a valid
+# package (with version) name. If it is a valid name, pkgsplit will
+# return a list containing: [ pkgname, pkgversion(norev), pkgrev ].
+# For foo-1.2-1, this list would be [ "foo", "1.2", "1" ]. For
+# Mesa-3.0, this list would be [ "Mesa", "3.0", "0" ].
+pkgcache={}
+
+def pkgsplit(mypkg,silent=1):
+ try:
+ if not pkgcache[mypkg]:
+ return None
+ return pkgcache[mypkg][:]
+ except KeyError:
+ pass
+ myparts=string.split(mypkg,'-')
+ if len(myparts)<2:
+ if not silent:
+ print "!!! Name error in",mypkg+": missing a version or name part."
+ pkgcache[mypkg]=None
+ return None
+ for x in myparts:
+ if len(x)==0:
+ if not silent:
+ print "!!! Name error in",mypkg+": empty \"-\" part."
+ pkgcache[mypkg]=None
+ return None
+ #verify rev
+ revok=0
+ myrev=myparts[-1]
+ if len(myrev) and myrev[0]=="r":
+ try:
+ int(myrev[1:])
+ revok=1
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ if revok:
+ if ververify(myparts[-2]):
+ if len(myparts)==2:
+ pkgcache[mypkg]=None
+ return None
+ else:
+ for x in myparts[:-2]:
+ if ververify(x):
+ pkgcache[mypkg]=None
+ return None
+ #names can't have versiony looking parts
+ myval=[string.join(myparts[:-2],"-"),myparts[-2],myparts[-1]]
+ pkgcache[mypkg]=myval
+ return myval
+ else:
+ pkgcache[mypkg]=None
+ return None
+
+ elif ververify(myparts[-1],silent=silent):
+ if len(myparts)==1:
+ if not silent:
+ print "!!! Name error in",mypkg+": missing name part."
+ pkgcache[mypkg]=None
+ return None
+ else:
+ for x in myparts[:-1]:
+ if ververify(x):
+ if not silent:
+ print "!!! Name error in",mypkg+": multiple version parts."
+ pkgcache[mypkg]=None
+ return None
+ myval=[string.join(myparts[:-1],"-"),myparts[-1],"r0"]
+ pkgcache[mypkg]=myval[:]
+ return myval
+ else:
+ pkgcache[mypkg]=None
+ return None
+
+def getCPFromCPV(mycpv):
+ """Calls pkgsplit on a cpv and returns only the cp."""
+ return pkgsplit(mycpv)[0]
+
+catcache={}
+def catpkgsplit(mydata,silent=1):
+ "returns [cat, pkgname, version, rev ]"
+ try:
+ if not catcache[mydata]:
+ return None
+ return catcache[mydata][:]
+ except KeyError:
+ pass
+ mysplit=mydata.split("/")
+ p_split=None
+ if len(mysplit)==1:
+ retval=["null"]
+ p_split=pkgsplit(mydata,silent=silent)
+ elif len(mysplit)==2:
+ retval=[mysplit[0]]
+ p_split=pkgsplit(mysplit[1],silent=silent)
+ if not p_split:
+ catcache[mydata]=None
+ return None
+ retval.extend(p_split)
+ catcache[mydata]=retval
+ return retval
+
+# vercmp:
+# This takes two version strings and returns an integer to tell you whether
+# the versions are the same, val1>val2 or val2>val1.
+vcmpcache={}
+def vercmp(val1,val2):
+ if val1==val2:
+ #quick short-circuit
+ return 0
+ valkey=val1+" "+val2
+ try:
+ return vcmpcache[valkey]
+ try:
+ return -vcmpcache[val2+" "+val1]
+ except KeyError:
+ pass
+ except KeyError:
+ pass
+
+ # consider 1_p2 vc 1.1
+ # after expansion will become (1_p2,0) vc (1,1)
+ # then 1_p2 is compared with 1 before 0 is compared with 1
+ # to solve the bug we need to convert it to (1,0_p2)
+ # by splitting _prepart part and adding it back _after_expansion
+ val1_prepart = val2_prepart = ''
+ if val1.count('_'):
+ val1, val1_prepart = val1.split('_', 1)
+ if val2.count('_'):
+ val2, val2_prepart = val2.split('_', 1)
+
+ # replace '-' by '.'
+ # FIXME: Is it needed? can val1/2 contain '-'?
+ val1=string.split(val1,'-')
+ if len(val1)==2:
+ val1[0]=val1[0]+"."+val1[1]
+ val2=string.split(val2,'-')
+ if len(val2)==2:
+ val2[0]=val2[0]+"."+val2[1]
+
+ val1=string.split(val1[0],'.')
+ val2=string.split(val2[0],'.')
+
+ #add back decimal point so that .03 does not become "3" !
+ for x in range(1,len(val1)):
+ if val1[x][0] == '0' :
+ val1[x]='.' + val1[x]
+ for x in range(1,len(val2)):
+ if val2[x][0] == '0' :
+ val2[x]='.' + val2[x]
+
+ # extend version numbers
+ if len(val2)<len(val1):
+ val2.extend(["0"]*(len(val1)-len(val2)))
+ elif len(val1)<len(val2):
+ val1.extend(["0"]*(len(val2)-len(val1)))
+
+ # add back _prepart tails
+ if val1_prepart:
+ val1[-1] += '_' + val1_prepart
+ if val2_prepart:
+ val2[-1] += '_' + val2_prepart
+ #The above code will extend version numbers out so they
+ #have the same number of digits.
+ for x in range(0,len(val1)):
+ cmp1=relparse(val1[x])
+ cmp2=relparse(val2[x])
+ for y in range(0,4):
+ myret=cmp1[y]-cmp2[y]
+ if myret != 0:
+ vcmpcache[valkey]=myret
+ return myret
+ vcmpcache[valkey]=0
+ return 0
+
+
+def pkgcmp(pkg1,pkg2):
+ """if returnval is less than zero, then pkg2 is newer than pkg1, zero if equal and positive if older."""
+ if pkg1[0] != pkg2[0]:
+ return None
+ mycmp=vercmp(pkg1[1],pkg2[1])
+ if mycmp>0:
+ return 1
+ if mycmp<0:
+ return -1
+ r1=int(pkg1[2][1:])
+ r2=int(pkg2[2][1:])
+ if r1>r2:
+ return 1
+ if r2>r1:
+ return -1
+ return 0
+
+def dep_parenreduce(mysplit,mypos=0):
+ "Accepts a list of strings, and converts '(' and ')' surrounded items to sub-lists"
+ while (mypos<len(mysplit)):
+ if (mysplit[mypos]=="("):
+ firstpos=mypos
+ mypos=mypos+1
+ while (mypos<len(mysplit)):
+ if mysplit[mypos]==")":
+ mysplit[firstpos:mypos+1]=[mysplit[firstpos+1:mypos]]
+ mypos=firstpos
+ break
+ elif mysplit[mypos]=="(":
+ #recurse
+ mysplit=dep_parenreduce(mysplit,mypos=mypos)
+ mypos=mypos+1
+ mypos=mypos+1
+ return mysplit
+
+def dep_opconvert(mysplit,myuse,mysettings):
+ "Does dependency operator conversion"
+
+ #check_config_instance(mysettings)
+
+ mypos=0
+ newsplit=[]
+ while mypos<len(mysplit):
+ if type(mysplit[mypos])==types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos],myuse,mysettings))
+ mypos += 1
+ elif mysplit[mypos]==")":
+ #mismatched paren, error
+ return None
+ elif mysplit[mypos]=="||":
+ if ((mypos+1)>=len(mysplit)) or (type(mysplit[mypos+1])!=types.ListType):
+ # || must be followed by paren'd list
+ return None
+ try:
+ mynew=dep_opconvert(mysplit[mypos+1],myuse,mysettings)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Unable to satisfy OR dependency:",string.join(mysplit," || ")
+ raise e
+ mynew[0:0]=["||"]
+ newsplit.append(mynew)
+ mypos += 2
+ elif mysplit[mypos][-1]=="?":
+ #uses clause, i.e "gnome? ( foo bar )"
+ #this is a quick and dirty hack so that repoman can enable all USE vars:
+ if (len(myuse)==1) and (myuse[0]=="*") and mysettings:
+ # enable it even if it's ! (for repoman) but kill it if it's
+ # an arch variable that isn't for this arch. XXX Sparc64?
+ k=mysplit[mypos][:-1]
+ if k[0]=="!":
+ k=k[1:]
+ if k not in archlist and k not in mysettings.usemask:
+ enabled=1
+ elif k in archlist:
+ if k==mysettings["ARCH"]:
+ if mysplit[mypos][0]=="!":
+ enabled=0
+ else:
+ enabled=1
+ elif mysplit[mypos][0]=="!":
+ enabled=1
+ else:
+ enabled=0
+ else:
+ enabled=0
+ else:
+ if mysplit[mypos][0]=="!":
+ myusevar=mysplit[mypos][1:-1]
+ if myusevar in myuse:
+ enabled=0
+ else:
+ enabled=1
+ else:
+ myusevar=mysplit[mypos][:-1]
+ if myusevar in myuse:
+ enabled=1
+ else:
+ enabled=0
+ if (mypos+2<len(mysplit)) and (mysplit[mypos+2]==":"):
+ #colon mode
+ if enabled:
+ #choose the first option
+ if type(mysplit[mypos+1])==types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos+1],myuse,mysettings))
+ else:
+ newsplit.append(mysplit[mypos+1])
+ else:
+ #choose the alternate option
+ if type(mysplit[mypos+1])==types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos+3],myuse,mysettings))
+ else:
+ newsplit.append(mysplit[mypos+3])
+ mypos += 4
+ else:
+ #normal use mode
+ if enabled:
+ if type(mysplit[mypos+1])==types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos+1],myuse,mysettings))
+ else:
+ newsplit.append(mysplit[mypos+1])
+ #otherwise, continue.
+ mypos += 2
+ else:
+ #normal item
+ newsplit.append(mysplit[mypos])
+ mypos += 1
+ return newsplit
+
+def dep_virtual(mysplit, mysettings):
+ "Does virtual dependency conversion"
+
+
+
+ newsplit=[]
+ for x in mysplit:
+ if type(x)==types.ListType:
+ newsplit.append(dep_virtual(x, mysettings))
+ else:
+ mykey=dep_getkey(x)
+ if mysettings.virtuals.has_key(mykey):
+ if len(mysettings.virtuals[mykey])==1:
+ a=string.replace(x, mykey, mysettings.virtuals[mykey][0])
+ else:
+ if x[0]=="!":
+ # blocker needs "and" not "or(||)".
+ a=[]
+ else:
+ a=['||']
+ for y in mysettings.virtuals[mykey]:
+ a.append(string.replace(x, mykey, y))
+ newsplit.append(a)
+ else:
+ newsplit.append(x)
+ return newsplit
+
+def dep_eval(deplist):
+ if len(deplist)==0:
+ return 1
+ if deplist[0]=="||":
+ #or list; we just need one "1"
+ for x in deplist[1:]:
+ if type(x)==types.ListType:
+ if dep_eval(x)==1:
+ return 1
+ elif x==1:
+ return 1
+ return 0
+ else:
+ for x in deplist:
+ if type(x)==types.ListType:
+ if dep_eval(x)==0:
+ return 0
+ elif x==0 or x==2:
+ return 0
+ return 1
+
+def dep_zapdeps(unreduced,reduced,vardbapi=None,use_binaries=0):
+ """Takes an unreduced and reduced deplist and removes satisfied dependencies.
+ Returned deplist contains steps that must be taken to satisfy dependencies."""
+ writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
+ if unreduced==[] or unreduced==['||'] :
+ return []
+ if unreduced[0]=="||":
+ if dep_eval(reduced):
+ #deps satisfied, return empty list.
+ return []
+ else:
+ #try to find an installed dep.
+ ### We use fakedb when --update now, so we can't use local vardbapi here.
+ ### This should be fixed in the feature.
+ ### see bug 45468.
+ ##if vardbapi:
+ ## mydbapi=vardbapi
+ ##else:
+ ## mydbapi=db[root]["vartree"].dbapi
+ mydbapi=db[root]["vartree"].dbapi
+
+ if db["/"].has_key("porttree"):
+ myportapi=db["/"]["porttree"].dbapi
+ else:
+ myportapi=None
+
+ if use_binaries and db["/"].has_key("bintree"):
+ mybinapi=db["/"]["bintree"].dbapi
+ writemsg("Using bintree...\n",2)
+ else:
+ mybinapi=None
+
+ x=1
+ candidate=[]
+ while x<len(reduced):
+ writemsg("x: %s, reduced[x]: %s\n" % (x,reduced[x]), 2)
+ if (type(reduced[x])==types.ListType):
+ newcand = dep_zapdeps(unreduced[x], reduced[x], vardbapi=vardbapi, use_binaries=use_binaries)
+ candidate.append(newcand)
+ else:
+ if (reduced[x]==False):
+ candidate.append([unreduced[x]])
+ else:
+ candidate.append([])
+ x+=1
+
+ #use installed and no-masked package(s) in portage.
+ for x in candidate:
+ match=1
+ for pkg in x:
+ if not mydbapi.match(pkg):
+ match=0
+ break
+ if myportapi:
+ if not myportapi.match(pkg):
+ match=0
+ break
+ if match:
+ writemsg("Installed match: %s\n" % (x), 2)
+ return x
+
+ # Use binary packages if available.
+ if mybinapi:
+ for x in candidate:
+ match=1
+ for pkg in x:
+ if not mybinapi.match(pkg):
+ match=0
+ break
+ else:
+ writemsg("Binary match: %s\n" % (pkg), 2)
+ if match:
+ writemsg("Binary match final: %s\n" % (x), 2)
+ return x
+
+ #use no-masked package(s) in portage tree
+ if myportapi:
+ for x in candidate:
+ match=1
+ for pkg in x:
+ if not myportapi.match(pkg):
+ match=0
+ break
+ if match:
+ writemsg("Porttree match: %s\n" % (x), 2)
+ return x
+
+ #none of the no-masked pkg, use the first one
+ writemsg("Last resort candidate: %s\n" % (candidate[0]), 2)
+ return candidate[0]
+ else:
+ if dep_eval(reduced):
+ #deps satisfied, return empty list.
+ return []
+ else:
+ returnme=[]
+ x=0
+ while x<len(reduced):
+ if type(reduced[x])==types.ListType:
+ returnme+=dep_zapdeps(unreduced[x],reduced[x], vardbapi=vardbapi, use_binaries=use_binaries)
+ else:
+ if reduced[x]==False:
+ returnme.append(unreduced[x])
+ x += 1
+ return returnme
+
+def dep_getkey(mydep):
+ if not len(mydep):
+ return mydep
+ if mydep[0]=="*":
+ mydep=mydep[1:]
+ if mydep[-1]=="*":
+ mydep=mydep[:-1]
+ if mydep[0]=="!":
+ mydep=mydep[1:]
+ if mydep[:2] in [ ">=", "<=" ]:
+ mydep=mydep[2:]
+ elif mydep[:1] in "=<>~":
+ mydep=mydep[1:]
+ if isspecific(mydep):
+ mysplit=catpkgsplit(mydep)
+ if not mysplit:
+ return mydep
+ return mysplit[0]+"/"+mysplit[1]
+ else:
+ return mydep
+
+def dep_getcpv(mydep):
+ if not len(mydep):
+ return mydep
+ if mydep[0]=="*":
+ mydep=mydep[1:]
+ if mydep[-1]=="*":
+ mydep=mydep[:-1]
+ if mydep[0]=="!":
+ mydep=mydep[1:]
+ if mydep[:2] in [ ">=", "<=" ]:
+ mydep=mydep[2:]
+ elif mydep[:1] in "=<>~":
+ mydep=mydep[1:]
+ return mydep
+
+def cpv_getkey(mycpv):
+ myslash=mycpv.split("/")
+ mysplit=pkgsplit(myslash[-1])
+ mylen=len(myslash)
+ if mylen==2:
+ return myslash[0]+"/"+mysplit[0]
+ elif mylen==1:
+ return mysplit[0]
+ else:
+ return mysplit
+
+def key_expand(mykey,mydb=None,use_cache=1):
+ mysplit=mykey.split("/")
+ if len(mysplit)==1:
+ if mydb and type(mydb)==types.InstanceType:
+ for x in settings.categories:
+ if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
+ return x+"/"+mykey
+ if virts_p.has_key(mykey):
+ return(virts_p[mykey][0])
+ return "null/"+mykey
+ elif mydb:
+ if type(mydb)==types.InstanceType:
+ if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
+ return virts[mykey][0]
+ return mykey
+
+def cpv_expand(mycpv,mydb=None,use_cache=1):
+ """Given a string (packagename or virtual) expand it into a valid
+ cat/package string. Virtuals use the mydb to determine which provided
+ virtual is a valid choice and defaults to the first element when there
+ are no installed/available candidates."""
+ myslash=mycpv.split("/")
+ mysplit=pkgsplit(myslash[-1])
+ if len(myslash)>2:
+ # this is illegal case.
+ mysplit=[]
+ mykey=mycpv
+ elif len(myslash)==2:
+ if mysplit:
+ mykey=myslash[0]+"/"+mysplit[0]
+ else:
+ mykey=mycpv
+ if mydb:
+ writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
+ if type(mydb)==types.InstanceType:
+ if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
+ writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
+ mykey_orig = mykey[:]
+ for vkey in virts[mykey]:
+ if mydb.cp_list(vkey,use_cache=use_cache):
+ mykey = vkey
+ writemsg("virts chosen: %s\n" % (mykey), 1)
+ break
+ if mykey == mykey_orig:
+ mykey=virts[mykey][0]
+ writemsg("virts defaulted: %s\n" % (mykey), 1)
+ #we only perform virtual expansion if we are passed a dbapi
+ else:
+ #specific cpv, no category, ie. "foo-1.0"
+ if mysplit:
+ myp=mysplit[0]
+ else:
+ # "foo" ?
+ myp=mycpv
+ mykey=None
+ matches=[]
+ if mydb:
+ for x in settings.categories:
+ if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
+ matches.append(x+"/"+myp)
+ if (len(matches)>1):
+ raise ValueError, matches
+ elif matches:
+ mykey=matches[0]
+
+ if not mykey and type(mydb)!=types.ListType:
+ if virts_p.has_key(myp):
+ mykey=virts_p[myp][0]
+ #again, we only perform virtual expansion if we have a dbapi (not a list)
+ if not mykey:
+ mykey="null/"+myp
+ if mysplit:
+ if mysplit[2]=="r0":
+ return mykey+"-"+mysplit[1]
+ else:
+ return mykey+"-"+mysplit[1]+"-"+mysplit[2]
+ else:
+ return mykey
+
+def dep_transform(mydep,oldkey,newkey):
+ origdep=mydep
+ if not len(mydep):
+ return mydep
+ if mydep[0]=="*":
+ mydep=mydep[1:]
+ prefix=""
+ postfix=""
+ if mydep[-1]=="*":
+ mydep=mydep[:-1]
+ postfix="*"
+ if mydep[:2] in [ ">=", "<=" ]:
+ prefix=mydep[:2]
+ mydep=mydep[2:]
+ elif mydep[:1] in "=<>~!":
+ prefix=mydep[:1]
+ mydep=mydep[1:]
+ if mydep==oldkey:
+ return prefix+newkey+postfix
+ else:
+ return origdep
+
+def dep_expand(mydep,mydb=None,use_cache=1):
+ if not len(mydep):
+ return mydep
+ if mydep[0]=="*":
+ mydep=mydep[1:]
+ prefix=""
+ postfix=""
+ if mydep[-1]=="*":
+ mydep=mydep[:-1]
+ postfix="*"
+ if mydep[:2] in [ ">=", "<=" ]:
+ prefix=mydep[:2]
+ mydep=mydep[2:]
+ elif mydep[:1] in "=<>~!":
+ prefix=mydep[:1]
+ mydep=mydep[1:]
+ return prefix+cpv_expand(mydep,mydb=mydb,use_cache=use_cache)+postfix
+
+def dep_check(depstring,mydbapi,mysettings,use="yes",mode=None,myuse=None,use_cache=1,use_binaries=0):
+ """Takes a depend string and parses the condition."""
+
+ #check_config_instance(mysettings)
+
+ if use=="all":
+ #enable everything (for repoman)
+ myusesplit=["*"]
+ elif use=="yes":
+ if myuse==None:
+ #default behavior
+ myusesplit = string.split(mysettings["USE"])
+ else:
+ myusesplit = myuse
+ # We've been given useflags to use.
+ #print "USE FLAGS PASSED IN."
+ #print myuse
+ #if "bindist" in myusesplit:
+ # print "BINDIST is set!"
+ #else:
+ # print "BINDIST NOT set."
+ else:
+ #we are being run by autouse(), don't consult USE vars yet.
+ # WE ALSO CANNOT USE SETTINGS
+ myusesplit=[]
+
+ #convert parenthesis to sublists
+ mysplit = portage_dep.paren_reduce(depstring)
+
+ if mysettings:
+ # XXX: use="all" is only used by repoman. Why would repoman checks want
+ # profile-masked USE flags to be enabled?
+ #if use=="all":
+ # mymasks=archlist[:]
+ #else:
+ mymasks=mysettings.usemask+archlist[:]
+
+ while mysettings["ARCH"] in mymasks:
+ del mymasks[mymasks.index(mysettings["ARCH"])]
+ mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,masklist=mymasks,matchall=(use=="all"),excludeall=[mysettings["ARCH"]])
+ else:
+ mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,matchall=(use=="all"))
+
+ # Do the || conversions
+ mysplit=portage_dep.dep_opconvert(mysplit)
+
+ #convert virtual dependencies to normal packages.
+ mysplit=dep_virtual(mysplit, mysettings)
+ #if mysplit==None, then we have a parse error (paren mismatch or misplaced ||)
+ #up until here, we haven't needed to look at the database tree
+
+ if mysplit==None:
+ return [0,"Parse Error (parentheses mismatch?)"]
+ elif mysplit==[]:
+ #dependencies were reduced to nothing
+ return [1,[]]
+ mysplit2=mysplit[:]
+ mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
+ if mysplit2==None:
+ return [0,"Invalid token"]
+
+ writemsg("\n\n\n", 1)
+ writemsg("mysplit: %s\n" % (mysplit), 1)
+ writemsg("mysplit2: %s\n" % (mysplit2), 1)
+ myeval=dep_eval(mysplit2)
+ writemsg("myeval: %s\n" % (myeval), 1)
+
+ if myeval:
+ return [1,[]]
+ else:
+ myzaps = dep_zapdeps(mysplit,mysplit2,vardbapi=mydbapi,use_binaries=use_binaries)
+ mylist = flatten(myzaps)
+ writemsg("myzaps: %s\n" % (myzaps), 1)
+ writemsg("mylist: %s\n" % (mylist), 1)
+ #remove duplicates
+ mydict={}
+ for x in mylist:
+ mydict[x]=1
+ writemsg("mydict: %s\n" % (mydict), 1)
+ return [1,mydict.keys()]
+
+def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
+ "Reduces the deplist to ones and zeros"
+ mypos=0
+ deplist=mydeplist[:]
+ while mypos<len(deplist):
+ if type(deplist[mypos])==types.ListType:
+ #recurse
+ deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
+ elif deplist[mypos]=="||":
+ pass
+ else:
+ mykey = dep_getkey(deplist[mypos])
+ if mysettings and mysettings.pprovideddict.has_key(mykey) and \
+ match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
+ deplist[mypos]=True
+ else:
+ if mode:
+ mydep=mydbapi.xmatch(mode,deplist[mypos])
+ else:
+ mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
+ if mydep!=None:
+ tmp=(len(mydep)>=1)
+ if deplist[mypos][0]=="!":
+ #tmp=not tmp
+ # This is ad-hoc code. We should rewrite this later.. (See #52377)
+ # The reason is that portage uses fakedb when --update option now.
+ # So portage considers that a block package doesn't exist even if it exists.
+ # Then, #52377 happens.
+ # ==== start
+ # emerge checks if it's block or not, so we can always set tmp=False.
+ # but it's not clean..
+ tmp=False
+ # ==== end
+ deplist[mypos]=tmp
+ else:
+ #encountered invalid string
+ return None
+ mypos=mypos+1
+ return deplist
+
+def getmaskingreason(mycpv):
+ global portdb
+ mysplit = catpkgsplit(mycpv)
+ if not mysplit:
+ raise ValueError("invalid CPV: %s" % mycpv)
+ if not portdb.cpv_exists(mycpv):
+ raise KeyError("CPV %s does not exist" % mycpv)
+ mycp=mysplit[0]+"/"+mysplit[1]
+
+ if settings.pmaskdict.has_key(mycp):
+ for x in settings.pmaskdict[mycp]:
+ if mycpv in portdb.xmatch("match-all", x):
+ pmaskfile = open(settings["PORTDIR"]+"/profiles/package.mask")
+ comment = ""
+ l = "\n"
+ while len(l) > 0:
+ l = pmaskfile.readline()
+ if len(l) == 0:
+ pmaskfile.close()
+ return None
+ if l[0] == "#":
+ comment += l
+ elif l == "\n":
+ comment = ""
+ elif l.strip() == x:
+ pmaskfile.close()
+ return comment
+ pmaskfile.close()
+ return None
+
+def getmaskingstatus(mycpv):
+ global portdb
+ mysplit = catpkgsplit(mycpv)
+ if not mysplit:
+ raise ValueError("invalid CPV: %s" % mycpv)
+ if not portdb.cpv_exists(mycpv):
+ raise KeyError("CPV %s does not exist" % mycpv)
+ mycp=mysplit[0]+"/"+mysplit[1]
+
+ rValue = []
+
+ # profile checking
+ revmaskdict=settings.prevmaskdict
+ if revmaskdict.has_key(mycp):
+ for x in revmaskdict[mycp]:
+ if x[0]=="*":
+ myatom = x[1:]
+ else:
+ myatom = x
+ if not match_to_list(mycpv, [myatom]):
+ rValue.append("profile")
+ break
+
+ # package.mask checking
+ maskdict=settings.pmaskdict
+ unmaskdict=settings.punmaskdict
+ if maskdict.has_key(mycp):
+ for x in maskdict[mycp]:
+ if mycpv in portdb.xmatch("match-all", x):
+ unmask=0
+ if unmaskdict.has_key(mycp):
+ for z in unmaskdict[mycp]:
+ if mycpv in portdb.xmatch("match-all",z):
+ unmask=1
+ break
+ if unmask==0:
+ rValue.append("package.mask")
+
+ # keywords checking
+ mygroups = portdb.aux_get(mycpv, ["KEYWORDS"])[0].split()
+ pgroups=groups[:]
+ myarch = settings["ARCH"]
+ pkgdict = settings.pkeywordsdict
+
+ cp = dep_getkey(mycpv)
+ if pkgdict.has_key(cp):
+ matches = match_to_list(mycpv, pkgdict[cp].keys())
+ for match in matches:
+ pgroups.extend(pkgdict[cp][match])
+
+ kmask = "missing"
+
+ for keyword in pgroups:
+ if keyword in mygroups:
+ kmask=None
+
+ if kmask:
+ fallback = None
+ for gp in mygroups:
+ if gp=="*":
+ kmask=None
+ break
+ elif gp=="-*":
+ fallback="-*"
+ elif gp=="-"+myarch:
+ kmask="-"+myarch
+ break
+ elif gp=="~"+myarch:
+ kmask="~"+myarch
+ break
+ if kmask == "missing" and fallback:
+ kmask = fallback
+
+ if kmask:
+ rValue.append(kmask+" keyword")
+ return rValue
+
+def fixdbentries(old_value, new_value, dbdir):
+ """python replacement for the fixdbentries script, replaces old_value
+ with new_value for package names in files in dbdir."""
+ for myfile in [f for f in os.listdir(dbdir) if not f == "CONTENTS"]:
+ f = open(dbdir+"/"+myfile, "r")
+ mycontent = f.read()
+ f.close()
+ if not mycontent.count(old_value):
+ continue
+ old_value = re.escape(old_value);
+ mycontent = re.sub(old_value+"$", new_value, mycontent)
+ mycontent = re.sub(old_value+"(\\s)", new_value+"\\1", mycontent)
+ mycontent = re.sub(old_value+"(-[^a-zA-Z])", new_value+"\\1", mycontent)
+ mycontent = re.sub(old_value+"([^a-zA-Z0-9-])", new_value+"\\1", mycontent)
+ f = open(dbdir+"/"+myfile, "w")
+ f.write(mycontent)
+ f.close()
+
+class packagetree:
+ def __init__(self,virtual,clone=None):
+ if clone:
+ self.tree=clone.tree.copy()
+ self.populated=clone.populated
+ self.virtual=clone.virtual
+ self.dbapi=None
+ else:
+ self.tree={}
+ self.populated=0
+ self.virtual=virtual
+ self.dbapi=None
+
+ def resolve_key(self,mykey):
+ return key_expand(mykey,mydb=self.dbapi)
+
+ def dep_nomatch(self,mypkgdep):
+ mykey=dep_getkey(mypkgdep)
+ nolist=self.dbapi.cp_list(mykey)
+ mymatch=self.dbapi.match(mypkgdep)
+ if not mymatch:
+ return nolist
+ for x in mymatch:
+ if x in nolist:
+ nolist.remove(x)
+ return nolist
+
+ def depcheck(self,mycheck,use="yes",myusesplit=None):
+ return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
+
+ def populate(self):
+ "populates the tree with values"
+ populated=1
+ pass
+
+def best(mymatches):
+ "accepts None arguments; assumes matches are valid."
+ global bestcount
+ if mymatches==None:
+ return ""
+ if not len(mymatches):
+ return ""
+ bestmatch=mymatches[0]
+ p2=catpkgsplit(bestmatch)[1:]
+ for x in mymatches[1:]:
+ p1=catpkgsplit(x)[1:]
+ if pkgcmp(p1,p2)>0:
+ bestmatch=x
+ p2=catpkgsplit(bestmatch)[1:]
+ return bestmatch
+
+def match_to_list(mypkg,mylist):
+ """(pkgname,list)
+ Searches list for entries that matches the package.
+ """
+ matches=[]
+ for x in mylist:
+ if match_from_list(x,[mypkg]):
+ if x not in matches:
+ matches.append(x)
+ return matches
+
+def best_match_to_list(mypkg,mylist):
+ """(pkgname,list)
+ Returns the most specific entry (assumed to be the longest one)
+ that matches the package given.
+ """
+ # XXX Assumption is wrong sometimes.
+ maxlen = 0
+ bestm = None
+ for x in match_to_list(mypkg,mylist):
+ if len(x) > maxlen:
+ maxlen = len(x)
+ bestm = x
+ return bestm
+
+def catsplit(mydep):
+ return mydep.split("/", 1)
+
+def get_operator(mydep):
+ """
+ returns '~', '=', '>', '<', '=*', '>=', or '<='
+ """
+ if mydep[0] == "~":
+ operator = "~"
+ elif mydep[0] == "=":
+ if mydep[-1] == "*":
+ operator = "=*"
+ else:
+ operator = "="
+ elif mydep[0] in "><":
+ if len(mydep) > 1 and mydep[1] == "=":
+ operator = mydep[0:2]
+ else:
+ operator = mydep[0]
+ else:
+ operator = None
+
+ return operator
+
+
+def match_from_list(mydep,candidate_list):
+ if mydep[0] == "!":
+ mydep = mydep[1:]
+
+ mycpv = dep_getcpv(mydep)
+ mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
+
+ if not mycpv_cps:
+ cat,pkg = catsplit(mycpv)
+ ver = None
+ rev = None
+ else:
+ cat,pkg,ver,rev = mycpv_cps
+ if mydep == mycpv:
+ raise KeyError, "Specific key requires an operator (%s) (try adding an '=')" % (mydep)
+
+ if ver and rev:
+ operator = get_operator(mydep)
+ if not operator:
+ writemsg("!!! Invalid atom: %s\n" % mydep)
+ return []
+ else:
+ operator = None
+
+ mylist = []
+
+ if operator == None:
+ for x in candidate_list:
+ xs = pkgsplit(x)
+ if xs == None:
+ if x != mycpv:
+ continue
+ elif xs[0] != mycpv:
+ continue
+ mylist.append(x)
+
+ elif operator == "=": # Exact match
+ if mycpv in candidate_list:
+ mylist = [mycpv]
+
+ elif operator == "=*": # glob match
+ # The old verion ignored _tag suffixes... This one doesn't.
+ for x in candidate_list:
+ if x[0:len(mycpv)] == mycpv:
+ mylist.append(x)
+
+ elif operator == "~": # version, any revision, match
+ for x in candidate_list:
+ xs = catpkgsplit(x)
+ if xs[0:2] != mycpv_cps[0:2]:
+ continue
+ if xs[2] != ver:
+ continue
+ mylist.append(x)
+
+ elif operator in [">", ">=", "<", "<="]:
+ for x in candidate_list:
+ try:
+ result = pkgcmp(pkgsplit(x), [cat+"/"+pkg,ver,rev])
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("\nInvalid package name: %s\n" % x)
+ sys.exit(73)
+ if result == None:
+ continue
+ elif operator == ">":
+ if result > 0:
+ mylist.append(x)
+ elif operator == ">=":
+ if result >= 0:
+ mylist.append(x)
+ elif operator == "<":
+ if result < 0:
+ mylist.append(x)
+ elif operator == "<=":
+ if result <= 0:
+ mylist.append(x)
+ else:
+ raise KeyError, "Unknown operator: %s" % mydep
+ else:
+ raise KeyError, "Unknown operator: %s" % mydep
+
+
+ return mylist
+
+
+def match_from_list_original(mydep,mylist):
+ """(dep,list)
+ Reduces the list down to those that fit the dep
+ """
+ mycpv=dep_getcpv(mydep)
+ if isspecific(mycpv):
+ cp_key=catpkgsplit(mycpv)
+ if cp_key==None:
+ return []
+ else:
+ cp_key=None
+ #Otherwise, this is a special call; we can only select out of the ebuilds specified in the specified mylist
+ if (mydep[0]=="="):
+ if cp_key==None:
+ return []
+ if mydep[-1]=="*":
+ #example: "=sys-apps/foo-1.0*"
+ try:
+ #now, we grab the version of our dependency...
+ mynewsplit=string.split(cp_key[2],'.')
+ #split it...
+ mynewsplit[-1]=`int(mynewsplit[-1])+1`
+ #and increment the last digit of the version by one.
+ #We don't need to worry about _pre and friends because they're not supported with '*' deps.
+ new_v=string.join(mynewsplit,".")+"_alpha0"
+ #new_v will be used later in the code when we do our comparisons using pkgcmp()
+ except SystemExit, e:
+ raise
+ except:
+ #erp, error.
+ return []
+ mynodes=[]
+ cmp1=cp_key[1:]
+ cmp1[1]=cmp1[1]+"_alpha0"
+ cmp2=[cp_key[1],new_v,"r0"]
+ for x in mylist:
+ cp_x=catpkgsplit(x)
+ if cp_x==None:
+ #hrm, invalid entry. Continue.
+ continue
+ #skip entries in our list that do not have matching categories
+ if cp_key[0]!=cp_x[0]:
+ continue
+ # ok, categories match. Continue to next step.
+ if ((pkgcmp(cp_x[1:],cmp1)>=0) and (pkgcmp(cp_x[1:],cmp2)<0)):
+ # entry is >= the version in specified in our dependency, and <= the version in our dep + 1; add it:
+ mynodes.append(x)
+ return mynodes
+ else:
+ # Does our stripped key appear literally in our list? If so, we have a match; if not, we don't.
+ if mycpv in mylist:
+ return [mycpv]
+ else:
+ return []
+ elif (mydep[0]==">") or (mydep[0]=="<"):
+ if cp_key==None:
+ return []
+ if (len(mydep)>1) and (mydep[1]=="="):
+ cmpstr=mydep[0:2]
+ else:
+ cmpstr=mydep[0]
+ mynodes=[]
+ for x in mylist:
+ cp_x=catpkgsplit(x)
+ if cp_x==None:
+ #invalid entry; continue.
+ continue
+ if cp_key[0]!=cp_x[0]:
+ continue
+ if eval("pkgcmp(cp_x[1:],cp_key[1:])"+cmpstr+"0"):
+ mynodes.append(x)
+ return mynodes
+ elif mydep[0]=="~":
+ if cp_key==None:
+ return []
+ myrev=-1
+ for x in mylist:
+ cp_x=catpkgsplit(x)
+ if cp_x==None:
+ #invalid entry; continue
+ continue
+ if cp_key[0]!=cp_x[0]:
+ continue
+ if cp_key[2]!=cp_x[2]:
+ #if version doesn't match, skip it
+ continue
+ myint = int(cp_x[3][1:])
+ if myint > myrev:
+ myrev = myint
+ mymatch = x
+ if myrev == -1:
+ return []
+ else:
+ return [mymatch]
+ elif cp_key==None:
+ if mydep[0]=="!":
+ return []
+ #we check ! deps in emerge itself, so always returning [] is correct.
+ mynodes=[]
+ cp_key=mycpv.split("/")
+ for x in mylist:
+ cp_x=catpkgsplit(x)
+ if cp_x==None:
+ #invalid entry; continue
+ continue
+ if cp_key[0]!=cp_x[0]:
+ continue
+ if cp_key[1]!=cp_x[1]:
+ continue
+ mynodes.append(x)
+ return mynodes
+ else:
+ return []
+
+
+class portagetree:
+ def __init__(self,root="/",virtual=None,clone=None):
+ global portdb
+ if clone:
+ self.root=clone.root
+ self.portroot=clone.portroot
+ self.pkglines=clone.pkglines
+ else:
+ self.root=root
+ self.portroot=settings["PORTDIR"]
+ self.virtual=virtual
+ self.dbapi=portdb
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method"
+ mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
+ if mymatch==None:
+ return ""
+ return mymatch
+
+ def dep_match(self,mydep):
+ "compatibility method"
+ mymatch=self.dbapi.xmatch("match-visible",mydep)
+ if mymatch==None:
+ return []
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getname(self,pkgname):
+ "returns file location for this particular package (DEPRECATED)"
+ if not pkgname:
+ return ""
+ mysplit=string.split(pkgname,"/")
+ psplit=pkgsplit(mysplit[1])
+ return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
+
+ def resolve_specific(self,myspec):
+ cps=catpkgsplit(myspec)
+ if not cps:
+ return None
+ mykey=key_expand(cps[0]+"/"+cps[1],mydb=self.dbapi)
+ mykey=mykey+"-"+cps[2]
+ if cps[3]!="r0":
+ mykey=mykey+"-"+cps[3]
+ return mykey
+
+ def depcheck(self,mycheck,use="yes",myusesplit=None):
+ return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+ return myslot
+
+
+class dbapi:
+ def __init__(self):
+ pass
+
+ def close_caches(self):
+ pass
+
+ def cp_list(self,cp,use_cache=1):
+ return
+
+ def aux_get(self,mycpv,mylist):
+ "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
+ raise NotImplementedError
+
+ def match(self,origdep,use_cache=1):
+ mydep=dep_expand(origdep,mydb=self)
+ mykey=dep_getkey(mydep)
+ mycat=mykey.split("/")[0]
+ return match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
+
+ def match2(self,mydep,mykey,mylist):
+ writemsg("DEPRECATED: dbapi.match2\n")
+ match_from_list(mydep,mylist)
+
+ def counter_tick(self,myroot,mycpv=None):
+ return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
+
+ def get_counter_tick_core(self,myroot,mycpv=None):
+ return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
+
+ def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
+ "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
+ cpath=myroot+"var/cache/edb/counter"
+ changed=0
+ min_counter = 0
+ if mycpv:
+ mysplit = pkgsplit(mycpv)
+ for x in self.match(mysplit[0],use_cache=0):
+ # fixed bug #41062
+ if x==mycpv:
+ continue
+ try:
+ old_counter = long(self.aux_get(x,["COUNTER"])[0])
+ writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
+ except SystemExit, e:
+ raise
+ except:
+ old_counter = 0
+ writemsg("!!! BAD COUNTER in '%s'\n" % (x))
+ if old_counter > min_counter:
+ min_counter = old_counter
+
+ # We write our new counter value to a new file that gets moved into
+ # place to avoid filesystem corruption.
+ if os.path.exists(cpath):
+ cfile=open(cpath, "r")
+ try:
+ counter=long(cfile.readline())
+ except (ValueError,OverflowError):
+ try:
+ counter=long(commands.getoutput("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'"))
+ writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter)
+ changed=1
+ except (ValueError,OverflowError):
+ writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n")
+ writemsg("!!! corrected/normalized so that portage can operate properly.\n")
+ writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
+ sys.exit(2)
+ cfile.close()
+ else:
+ try:
+ counter=long(commands.getoutput("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'"))
+ writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("!!! Initializing global counter.\n")
+ counter=long(0)
+ changed=1
+
+ if counter < min_counter:
+ counter = min_counter+1000
+ changed = 1
+
+ if incrementing or changed:
+
+ #increment counter
+ counter += 1
+ # update new global counter file
+ newcpath=cpath+".new"
+ newcfile=open(newcpath,"w")
+ newcfile.write(str(counter))
+ newcfile.close()
+ # now move global counter file into place
+ os.rename(newcpath,cpath)
+ return counter
+
+ def invalidentry(self, mypath):
+ if re.search("portage_lockfile$",mypath):
+ if not os.environ.has_key("PORTAGE_MASTER_PID"):
+ writemsg("Lockfile removed: %s\n" % mypath, 1)
+ portage_locks.unlockfile((mypath,None,None))
+ else:
+ # Nothing we can do about it. We're probably sandboxed.
+ pass
+ elif re.search(".*/-MERGING-(.*)",mypath):
+ if os.path.exists(mypath):
+ writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n")
+ else:
+ writemsg("!!! Invalid db entry: %s\n" % mypath)
+
+
+
+class fakedbapi(dbapi):
+ "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
+ def __init__(self):
+ self.cpvdict={}
+ self.cpdict={}
+
+ def cpv_exists(self,mycpv):
+ return self.cpvdict.has_key(mycpv)
+
+ def cp_list(self,mycp,use_cache=1):
+ if not self.cpdict.has_key(mycp):
+ return []
+ else:
+ return self.cpdict[mycp]
+
+ def cp_all(self):
+ returnme=[]
+ for x in self.cpdict.keys():
+ returnme.extend(self.cpdict[x])
+ return returnme
+
+ def cpv_inject(self,mycpv):
+ """Adds a cpv from the list of available packages."""
+ mycp=cpv_getkey(mycpv)
+ self.cpvdict[mycpv]=1
+ if not self.cpdict.has_key(mycp):
+ self.cpdict[mycp]=[]
+ if not mycpv in self.cpdict[mycp]:
+ self.cpdict[mycp].append(mycpv)
+
+ #def cpv_virtual(self,oldcpv,newcpv):
+ # """Maps a cpv to the list of available packages."""
+ # mycp=cpv_getkey(newcpv)
+ # self.cpvdict[newcpv]=1
+ # if not self.virtdict.has_key(mycp):
+ # self.virtdict[mycp]=[]
+ # if not mycpv in self.virtdict[mycp]:
+ # self.virtdict[mycp].append(oldcpv)
+ # cpv_remove(oldcpv)
+
+ def cpv_remove(self,mycpv):
+ """Removes a cpv from the list of available packages."""
+ mycp=cpv_getkey(mycpv)
+ if self.cpvdict.has_key(mycpv):
+ del self.cpvdict[mycpv]
+ if not self.cpdict.has_key(mycp):
+ return
+ while mycpv in self.cpdict[mycp]:
+ del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
+ if not len(self.cpdict[mycp]):
+ del self.cpdict[mycp]
+
+class bindbapi(fakedbapi):
+ def __init__(self,mybintree=None):
+ self.bintree = mybintree
+ self.cpvdict={}
+ self.cpdict={}
+
+ def aux_get(self,mycpv,wants):
+ mysplit = string.split(mycpv,"/")
+ mylist = []
+ tbz2name = mysplit[1]+".tbz2"
+ if self.bintree and not self.bintree.isremote(mycpv):
+ tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
+ for x in wants:
+ if self.bintree and self.bintree.isremote(mycpv):
+ # We use the cache for remote packages
+ if self.bintree.remotepkgs[tbz2name].has_key(x):
+ mylist.append(self.bintree.remotepkgs[tbz2name][x][:]) # [:] Copy String
+ else:
+ mylist.append("")
+ else:
+ myval = tbz2.getfile(x)
+ if myval == None:
+ myval = ""
+ else:
+ myval = string.join(myval.split(),' ')
+ mylist.append(myval)
+
+ return mylist
+
+
+cptot=0
+class vardbapi(dbapi):
+ def __init__(self,root,categories=None):
+ self.root = root[:]
+ #cache for category directory mtimes
+ self.mtdircache = {}
+ #cache for dependency checks
+ self.matchcache = {}
+ #cache for cp_list results
+ self.cpcache = {}
+ self.blockers = None
+ self.categories = copy.deepcopy(categories)
+
+ def cpv_exists(self,mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ return os.path.exists(self.root+VDB_PATH+"/"+mykey)
+
+ def cpv_counter(self,mycpv):
+ "This method will grab the COUNTER. Returns a counter value."
+ cdir=self.root+VDB_PATH+"/"+mycpv
+ cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
+
+ # We write our new counter value to a new file that gets moved into
+ # place to avoid filesystem corruption on XFS (unexpected reboot.)
+ corrupted=0
+ if os.path.exists(cpath):
+ cfile=open(cpath, "r")
+ try:
+ counter=long(cfile.readline())
+ except ValueError:
+ print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
+ counter=long(0)
+ corrupted=1
+ cfile.close()
+ elif os.path.exists(cdir):
+ mys = pkgsplit(mycpv)
+ myl = self.match(mys[0],use_cache=0)
+ print mys,myl
+ if len(myl) == 1:
+ try:
+ # Only one package... Counter doesn't matter.
+ myf = open(cpath, "w")
+ myf.write("1")
+ myf.flush()
+ myf.close()
+ counter = 1
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
+ writemsg("!!! unmerge this exact version.\n")
+ writemsg("!!! %s\n" % e)
+ sys.exit(1)
+ else:
+ writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
+ writemsg("!!! remerge the package.\n")
+ sys.exit(1)
+ else:
+ counter=long(0)
+ if corrupted:
+ newcpath=cpath+".new"
+ # update new global counter file
+ newcfile=open(newcpath,"w")
+ newcfile.write(str(counter))
+ newcfile.close()
+ # now move global counter file into place
+ os.rename(newcpath,cpath)
+ return counter
+
+ def cpv_inject(self,mycpv):
+ "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
+ os.makedirs(self.root+VDB_PATH+"/"+mycpv)
+ counter=db[self.root]["vartree"].dbapi.counter_tick(self.root,mycpv=mycpv)
+ # write local package counter so that emerge clean does the right thing
+ lcfile=open(self.root+VDB_PATH+"/"+mycpv+"/COUNTER","w")
+ lcfile.write(str(counter))
+ lcfile.close()
+
+ def isInjected(self,mycpv):
+ if self.cpv_exists(mycpv):
+ if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
+ return True
+ if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
+ return True
+ return False
+
+ def move_ent(self,mylist):
+ origcp=mylist[1]
+ newcp=mylist[2]
+ origmatches=self.match(origcp,use_cache=0)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ mycpsplit=catpkgsplit(mycpv)
+ mynewcpv=newcp+"-"+mycpsplit[2]
+ mynewcat=newcp.split("/")[0]
+ if mycpsplit[3]!="r0":
+ mynewcpv += "-"+mycpsplit[3]
+ mycpsplit_new = catpkgsplit(mynewcpv)
+ origpath=self.root+VDB_PATH+"/"+mycpv
+ if not os.path.exists(origpath):
+ continue
+ writemsg("@")
+ if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
+ #create the directory
+ os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
+ newpath=self.root+VDB_PATH+"/"+mynewcpv
+ if os.path.exists(newpath):
+ #dest already exists; keep this puppy where it is.
+ continue
+ spawn(MOVE_BINARY+" "+origpath+" "+newpath,settings, free=1)
+
+ # We need to rename the ebuild now.
+ old_eb_path = newpath+"/"+mycpsplit[1] +"-"+mycpsplit[2]
+ new_eb_path = newpath+"/"+mycpsplit_new[1]+"-"+mycpsplit[2]
+ if mycpsplit[3] != "r0":
+ old_eb_path += "-"+mycpsplit[3]
+ new_eb_path += "-"+mycpsplit[3]
+ if os.path.exists(old_eb_path+".ebuild"):
+ os.rename(old_eb_path+".ebuild", new_eb_path+".ebuild")
+
+ catfile=open(newpath+"/CATEGORY", "w")
+ catfile.write(mynewcat+"\n")
+ catfile.close()
+
+ dbdir = self.root+VDB_PATH
+ for catdir in listdir(dbdir):
+ catdir = dbdir+"/"+catdir
+ if os.path.isdir(catdir):
+ for pkgdir in listdir(catdir):
+ pkgdir = catdir+"/"+pkgdir
+ if os.path.isdir(pkgdir):
+ fixdbentries(origcp, newcp, pkgdir)
+
+ def move_slot_ent(self,mylist):
+ pkg=mylist[1]
+ origslot=mylist[2]
+ newslot=mylist[3]
+
+ origmatches=self.match(pkg,use_cache=0)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ origpath=self.root+VDB_PATH+"/"+mycpv
+ if not os.path.exists(origpath):
+ continue
+
+ slot=grabfile(origpath+"/SLOT");
+ if (not slot):
+ continue
+
+ if (slot[0]!=origslot):
+ continue
+
+ writemsg("s")
+ slotfile=open(origpath+"/SLOT", "w")
+ slotfile.write(newslot+"\n")
+ slotfile.close()
+
+ def cp_list(self,mycp,use_cache=1):
+ mysplit=mycp.split("/")
+ if mysplit[0] == '*':
+ mysplit[0] = mysplit[0][1:]
+ try:
+ mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
+ except OSError:
+ mystat=0
+ if use_cache and self.cpcache.has_key(mycp):
+ cpc=self.cpcache[mycp]
+ if cpc[0]==mystat:
+ return cpc[1]
+ list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
+
+ if (list==None):
+ return []
+ returnme=[]
+ for x in list:
+ if x[0] == '-':
+ #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
+ continue
+ ps=pkgsplit(x)
+ if not ps:
+ self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
+ continue
+ if len(mysplit) > 1:
+ if ps[0]==mysplit[1]:
+ returnme.append(mysplit[0]+"/"+x)
+ if use_cache:
+ self.cpcache[mycp]=[mystat,returnme]
+ elif self.cpcache.has_key(mycp):
+ del self.cpcache[mycp]
+ return returnme
+
+ def cpv_all(self,use_cache=1):
+ returnme=[]
+ basepath = self.root+VDB_PATH+"/"
+
+ mycats = self.categories
+ if mycats == None:
+ # XXX: CIRCULAR DEP! This helps backwards compat. --NJ (10 Sept 2004)
+ mycats = settings.categories
+
+ for x in mycats:
+ for y in listdir(basepath+x,EmptyOnError=1):
+ subpath = x+"/"+y
+ # -MERGING- should never be a cpv, nor should files.
+ if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
+ returnme += [subpath]
+ return returnme
+
+ def cp_all(self,use_cache=1):
+ mylist = self.cpv_all(use_cache=use_cache)
+ d={}
+ for y in mylist:
+ if y[0] == '*':
+ y = y[1:]
+ mysplit=catpkgsplit(y)
+ if not mysplit:
+ self.invalidentry(self.root+VDB_PATH+"/"+y)
+ continue
+ d[mysplit[0]+"/"+mysplit[1]] = None
+ return d.keys()
+
+ def checkblockers(self,origdep):
+ pass
+
+ def match(self,origdep,use_cache=1):
+ "caching match function"
+ mydep=dep_expand(origdep,mydb=self,use_cache=use_cache)
+ mykey=dep_getkey(mydep)
+ mycat=mykey.split("/")[0]
+ if not use_cache:
+ if self.matchcache.has_key(mycat):
+ del self.mtdircache[mycat]
+ del self.matchcache[mycat]
+ return match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
+ try:
+ curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
+ except SystemExit, e:
+ raise
+ except:
+ curmtime=0
+
+ if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
+ # clear cache entry
+ self.mtdircache[mycat]=curmtime
+ self.matchcache[mycat]={}
+ if not self.matchcache[mycat].has_key(mydep):
+ mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
+ self.matchcache[mycat][mydep]=mymatch
+ return self.matchcache[mycat][mydep][:]
+
+ def aux_get(self, mycpv, wants):
+ global auxdbkeys
+ results = []
+ if not self.cpv_exists(mycpv):
+ return []
+ for x in wants:
+ myfn = self.root+VDB_PATH+"/"+str(mycpv)+"/"+str(x)
+ if os.access(myfn,os.R_OK):
+ myf = open(myfn, "r")
+ myd = myf.read()
+ myf.close()
+ myd = re.sub("[\n\r\t]+"," ",myd)
+ myd = re.sub(" +"," ",myd)
+ myd = string.strip(myd)
+ else:
+ myd = ""
+ results.append(myd)
+ return results
+
+
+class vartree(packagetree):
+ "this tree will scan a var/db/pkg database located at root (passed to init)"
+ def __init__(self,root="/",virtual=None,clone=None,categories=None):
+ if clone:
+ self.root = clone.root[:]
+ self.dbapi = copy.deepcopy(clone.dbapi)
+ self.populated = 1
+ else:
+ self.root = root[:]
+ self.dbapi = vardbapi(self.root,categories=categories)
+ self.populated = 1
+
+ def zap(self,mycpv):
+ return
+
+ def inject(self,mycpv):
+ return
+
+ def get_provide(self,mycpv):
+ myprovides=[]
+ try:
+ mylines = grabfile(self.root+VDB_PATH+"/"+mycpv+"/PROVIDE")
+ if mylines:
+ myuse = grabfile(self.root+VDB_PATH+"/"+mycpv+"/USE")
+ myuse = string.split(string.join(myuse))
+ mylines = string.join(mylines)
+ mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
+ for myprovide in mylines:
+ mys = catpkgsplit(myprovide)
+ if not mys:
+ mys = string.split(myprovide, "/")
+ myprovides += [mys[0] + "/" + mys[1]]
+ return myprovides
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print
+ print "Check " + self.root+VDB_PATH+"/"+mycpv+"/PROVIDE and USE."
+ print "Possibly Invalid: " + str(mylines)
+ print "Exception: "+str(e)
+ print
+ return []
+
+ def get_all_provides(self):
+ myprovides = {}
+ for node in self.getallcpv():
+ for mykey in self.get_provide(node):
+ if myprovides.has_key(mykey):
+ myprovides[mykey] += [node]
+ else:
+ myprovides[mykey] = [node]
+ return myprovides
+
+ def dep_bestmatch(self,mydep,use_cache=1):
+ "compatibility method -- all matches, not just visible ones"
+ #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
+ mymatch=best(self.dbapi.match(dep_expand(mydep,mydb=self.dbapi),use_cache=use_cache))
+ if mymatch==None:
+ return ""
+ else:
+ return mymatch
+
+ def dep_match(self,mydep,use_cache=1):
+ "compatibility method -- we want to see all matches, not just visible ones"
+ #mymatch=match(mydep,self.dbapi)
+ mymatch=self.dbapi.match(mydep,use_cache=use_cache)
+ if mymatch==None:
+ return []
+ else:
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallcpv(self):
+ """temporary function, probably to be renamed --- Gets a list of all
+ category/package-versions installed on the system."""
+ return self.dbapi.cpv_all()
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def exists_specific_cat(self,cpv,use_cache=1):
+ cpv=key_expand(cpv,mydb=self.dbapi,use_cache=use_cache)
+ a=catpkgsplit(cpv)
+ if not a:
+ return 0
+ mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
+ for x in mylist:
+ b=pkgsplit(x)
+ if not b:
+ self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
+ continue
+ if a[1]==b[0]:
+ return 1
+ return 0
+
+ def getebuildpath(self,fullpackage):
+ cat,package=fullpackage.split("/")
+ return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
+
+ def getnode(self,mykey,use_cache=1):
+ mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
+ if not mykey:
+ return []
+ mysplit=mykey.split("/")
+ mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
+ returnme=[]
+ for x in mydirlist:
+ mypsplit=pkgsplit(x)
+ if not mypsplit:
+ self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
+ continue
+ if mypsplit[0]==mysplit[1]:
+ appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
+ returnme.append(appendme)
+ return returnme
+
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot=string.join(grabfile(self.root+VDB_PATH+"/"+mycatpkg+"/SLOT"))
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+ return myslot
+
+ def hasnode(self,mykey,use_cache):
+ """Does the particular node (cat/pkg key) exist?"""
+ mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
+ mysplit=mykey.split("/")
+ mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
+ for x in mydirlist:
+ mypsplit=pkgsplit(x)
+ if not mypsplit:
+ self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
+ continue
+ if mypsplit[0]==mysplit[1]:
+ return 1
+ return 0
+
+ def populate(self):
+ self.populated=1
+
+# ----------------------------------------------------------------------------
+class eclass_cache:
+ """Maintains the cache information about eclasses used in ebuild."""
+ def __init__(self,porttree_root,settings):
+ self.porttree_root = porttree_root
+ self.settings = settings
+ self.depcachedir = self.settings.depcachedir[:]
+
+ self.dbmodule = self.settings.load_best_module("eclass_cache.dbmodule")
+
+ self.packages = {} # {"PV": {"eclass1": ["location", "_mtime_"]}}
+ self.eclasses = {} # {"Name": ["location","_mtime_"]}
+
+ # don't fool with porttree ordering unless you *ensure* that ebuild.sh's inherit
+ # ordering is *exactly* the same
+ self.porttrees=[self.porttree_root]
+ self.porttrees.extend(self.settings["PORTDIR_OVERLAY"].split())
+ #normalize the path now, so it's not required later.
+ self.porttrees = [os.path.normpath(x) for x in self.porttrees]
+ self.update_eclasses()
+
+ def close_caches(self):
+ for x in self.packages.keys():
+ for y in self.packages[x].keys():
+ try:
+ self.packages[x][y].sync()
+ self.packages[x][y].close()
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ writemsg("Exception when closing DB: %s: %s\n" % (Exception,e))
+ del self.packages[x][y]
+ del self.packages[x]
+
+ def flush_cache(self):
+ self.packages = {}
+ self.eclasses = {}
+ self.update_eclasses()
+
+ def update_eclasses(self):
+ self.eclasses = {}
+ for x in suffix_array(self.porttrees, "/eclass"):
+ if x and os.path.exists(x):
+ dirlist = listdir(x)
+ for y in dirlist:
+ if y[-len(".eclass"):]==".eclass":
+ try:
+ ys=y[:-len(".eclass")]
+ ymtime=os.stat(x+"/"+y)[stat.ST_MTIME]
+ except SystemExit, e:
+ raise
+ except:
+ continue
+ self.eclasses[ys] = [x, ymtime]
+
+ def setup_package(self, location, cat, pkg):
+ if not self.packages.has_key(location):
+ self.packages[location] = {}
+
+ if not self.packages[location].has_key(cat):
+ try:
+ self.packages[location][cat] = self.dbmodule(self.depcachedir+"/"+location, cat+"-eclass", [], uid, portage_gid)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("\n!!! Failed to open the dbmodule for eclass caching.\n")
+ writemsg("!!! Generally these are permission problems. Caught exception follows:\n")
+ writemsg("!!! "+str(e)+"\n")
+ writemsg("!!! Dirname: "+str(self.depcachedir+"/"+location)+"\n")
+ writemsg("!!! Basename: "+str(cat+"-eclass")+"\n\n")
+ sys.exit(123)
+
+ def sync(self, location, cat, pkg):
+ if self.packages[location].has_key(cat):
+ self.packages[location][cat].sync()
+
+ def update_package(self, location, cat, pkg, eclass_list):
+ self.setup_package(location, cat, pkg)
+ if not eclass_list:
+ return 1
+
+ data = {}
+ for x in eclass_list:
+ if x not in self.eclasses:
+ writemsg("Eclass '%s' does not exist for '%s'\n" % (x, cat+"/"+pkg))
+ return 0
+ data[x] = [self.eclasses[x][0],self.eclasses[x][1]]
+
+ self.packages[location][cat][pkg] = data
+ self.sync(location,cat,pkg)
+ return 1
+
+ def is_current(self, location, cat, pkg, eclass_list):
+ self.setup_package(location, cat, pkg)
+
+ if not eclass_list:
+ return 1
+
+ if not (self.packages[location][cat].has_key(pkg) and self.packages[location][cat][pkg] and eclass_list):
+ return 0
+
+ myp = self.packages[location][cat][pkg]
+ for x in eclass_list:
+ if not (x in self.eclasses and x in myp and myp[x] == self.eclasses[x]):
+ return 0
+
+ return 1
+
+# ----------------------------------------------------------------------------
+
+auxdbkeys=[
+ 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
+ 'PDEPEND', 'PROVIDE',
+ 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
+ 'UNUSED_05', 'UNUSED_06', 'UNUSED_07', 'UNUSED_08',
+ ]
+auxdbkeylen=len(auxdbkeys)
+
+def close_portdbapi_caches():
+ for i in portdbapi.portdbapi_instances:
+ i.close_caches()
+class portdbapi(dbapi):
+ """this tree will scan a portage directory located at root (passed to init)"""
+ portdbapi_instances = []
+
+ def __init__(self,porttree_root,mysettings=None):
+ portdbapi.portdbapi_instances.append(self)
+ self.lock_held = 0;
+
+ if mysettings:
+ self.mysettings = mysettings
+ else:
+ self.mysettings = config(clone=settings)
+
+ self.manifestVerifyLevel = None
+ self.manifestVerifier = None
+ self.manifestCache = {} # {location: [stat, md5]}
+ self.manifestMissingCache = []
+
+ if "gpg" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.EXISTS
+ if "strict" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.MARGINAL
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+ elif "severe" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.TRUSTED
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
+ else:
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+
+ #self.root=settings["PORTDIR"]
+ self.porttree_root = porttree_root
+
+ self.depcachedir = self.mysettings.depcachedir[:]
+
+ self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
+ if self.tmpfs and not os.path.exists(self.tmpfs):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
+ self.tmpfs = None
+
+ self.eclassdb = eclass_cache(self.porttree_root, self.mysettings)
+
+ self.metadb = {}
+ self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
+
+ self.auxdb = {}
+ self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
+
+ #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+ self.xcache={}
+ self.frozen=0
+
+ self.porttrees=[self.porttree_root]+self.mysettings["PORTDIR_OVERLAY"].split()
+
+ def close_caches(self):
+ for x in self.auxdb.keys():
+ for y in self.auxdb[x].keys():
+ self.auxdb[x][y].sync()
+ self.auxdb[x][y].close()
+ del self.auxdb[x][y]
+ del self.auxdb[x]
+ self.eclassdb.close_caches()
+
+ def flush_cache(self):
+ self.metadb = {}
+ self.auxdb = {}
+ self.eclassdb.flush_cache()
+
+ def finddigest(self,mycpv):
+ try:
+ mydig = self.findname2(mycpv)[0]
+ mydigs = string.split(mydig, "/")[:-1]
+ mydig = string.join(mydigs, "/")
+
+ mysplit = mycpv.split("/")
+ except SystemExit, e:
+ raise
+ except:
+ return ""
+ return mydig+"/files/digest-"+mysplit[-1]
+
+ def findname(self,mycpv):
+ return self.findname2(mycpv)[0]
+
+ def findname2(self,mycpv):
+ "returns file location for this particular package and in_overlay flag"
+ if not mycpv:
+ return "",0
+ mysplit=mycpv.split("/")
+
+ psplit=pkgsplit(mysplit[1])
+ ret=None
+ if psplit:
+ for x in self.porttrees:
+ # XXX Why are there errors here? XXX
+ try:
+ file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print
+ print "!!! Problem with determining the name/location of an ebuild."
+ print "!!! Please report this on IRC and bugs if you are not causing it."
+ print "!!! mycpv: ",mycpv
+ print "!!! mysplit:",mysplit
+ print "!!! psplit: ",psplit
+ print "!!! error: ",e
+ print
+ sys.exit(17)
+
+ if os.access(file, os.R_OK):
+ # when found
+ ret=[file, x]
+ if ret:
+ return ret[0], ret[1]
+
+ # when not found
+ return None, 0
+
+ def aux_get(self,mycpv,mylist,strict=0,metacachedir=None,debug=0):
+ "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
+ global auxdbkeys,auxdbkeylen
+
+ cat,pkg = string.split(mycpv, "/", 1)
+
+ if metacachedir:
+ if cat not in self.metadb:
+ self.metadb[cat] = self.metadbmodule(metacachedir,cat,auxdbkeys,uid,portage_gid)
+
+ myebuild, mylocation=self.findname2(mycpv)
+
+ if not myebuild:
+ writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv})
+ writemsg("!!! %s\n" % myebuild)
+ raise KeyError, "'%(cpv)s' at %(path)s" % {"cpv":mycpv,"path":myebuild}
+
+ myManifestPath = string.join(myebuild.split("/")[:-1],"/")+"/Manifest"
+ if "gpg" in self.mysettings.features:
+ try:
+ mys = portage_gpg.fileStats(myManifestPath)
+ if (myManifestPath in self.manifestCache) and \
+ (self.manifestCache[myManifestPath] == mys):
+ pass
+ elif self.manifestVerifier:
+ if not self.manifestVerifier.verify(myManifestPath):
+ # Verification failed the desired level.
+ raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
+
+ if ("severe" in self.mysettings.features) and \
+ (mys != portage_gpg.fileStats(myManifestPath)):
+ raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
+
+ except portage_exception.InvalidSignature, e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise
+ writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
+ except portage_exception.MissingSignature, e:
+ if ("severe" in self.mysettings.features):
+ raise
+ if ("strict" in self.mysettings.features):
+ if myManifestPath not in self.manifestMissingCache:
+ writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
+ self.manifestMissingCache.insert(0,myManifestPath)
+ except (OSError,portage_exception.FileNotFound), e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
+ writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath})
+
+ if mylocation not in self.auxdb:
+ self.auxdb[mylocation] = {}
+
+ if not self.auxdb[mylocation].has_key(cat):
+ self.auxdb[mylocation][cat] = self.auxdbmodule(self.depcachedir+"/"+mylocation,cat,auxdbkeys,uid,portage_gid)
+
+ if os.access(myebuild, os.R_OK):
+ emtime=os.stat(myebuild)[stat.ST_MTIME]
+ else:
+ writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv})
+ writemsg("!!! %s\n" % myebuild)
+ raise KeyError
+
+ # when mylocation is not overlay directorys and metacachedir is set,
+ # we use cache files, which is usually on /usr/portage/metadata/cache/.
+ if mylocation==self.mysettings["PORTDIR"] and metacachedir and self.metadb[cat].has_key(pkg):
+ metadata=self.metadb[cat][pkg]
+ self.eclassdb.update_package(mylocation,cat,pkg,metadata["INHERITED"].split())
+ self.auxdb[mylocation][cat][pkg] = metadata
+ self.auxdb[mylocation][cat].sync()
+ else:
+
+ try:
+ auxdb_is_valid = self.auxdb[mylocation][cat].has_key(pkg) and \
+ self.auxdb[mylocation][cat][pkg].has_key("_mtime_") and \
+ self.auxdb[mylocation][cat][pkg]["_mtime_"] == emtime
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ auxdb_is_valid = 0
+ writemsg("auxdb exception: [%(loc)s]: %(exception)s\n" % {"loc":mylocation+"::"+cat+"/"+pkg, "exception":str(e)})
+ if self.auxdb[mylocation][cat].has_key(pkg):
+ self.auxdb[mylocation][cat].del_key(pkg)
+ self.auxdb[mylocation][cat].sync()
+
+ writemsg("auxdb is valid: "+str(auxdb_is_valid)+" "+str(pkg)+"\n", 2)
+ if auxdb_is_valid:
+ doregen=0
+ else:
+ doregen=1
+
+ if doregen or not self.eclassdb.is_current(mylocation,cat,pkg,self.auxdb[mylocation][cat][pkg]["INHERITED"].split()):
+ writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
+ writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
+
+ if self.tmpfs:
+ mydbkey = self.tmpfs+"/aux_db_key_temp"
+ else:
+ mydbkey = self.depcachedir+"/aux_db_key_temp"
+
+ # XXX: Part of the gvisible hack/fix to prevent deadlock
+ # XXX: through doebuild. Need to isolate this somehow...
+ self.mysettings.reset()
+
+ if self.lock_held:
+ raise "Lock is already held by me?"
+ self.lock_held = 1
+ mylock = portage_locks.lockfile(mydbkey, wantnewlockfile=1)
+
+ if os.path.exists(mydbkey):
+ try:
+ os.unlink(mydbkey)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ portage_locks.unlockfile(mylock)
+ self.lock_held = 0
+ writemsg("Uncaught handled exception: %(exception)s\n" % {"exception":str(e)})
+ raise
+
+ myret=doebuild(myebuild,"depend","/",self.mysettings,dbkey=mydbkey)
+ if myret:
+ portage_locks.unlockfile(mylock)
+ self.lock_held = 0
+ #depend returned non-zero exit code...
+ writemsg(str(red("\naux_get():")+" (0) Error in "+mycpv+" ebuild. ("+str(myret)+")\n"
+ " Check for syntax error or corruption in the ebuild. (--debug)\n\n"))
+ raise KeyError
+
+ try:
+ mycent=open(mydbkey,"r")
+ os.unlink(mydbkey)
+ mylines=mycent.readlines()
+ mycent.close()
+ except SystemExit, e:
+ raise
+ except (IOError, OSError):
+ portage_locks.unlockfile(mylock)
+ self.lock_held = 0
+ writemsg(str(red("\naux_get():")+" (1) Error in "+mycpv+" ebuild.\n"
+ " Check for syntax error or corruption in the ebuild. (--debug)\n\n"))
+ raise KeyError
+ except Exception, e:
+ portage_locks.unlockfile(mylock)
+ self.lock_held = 0
+ writemsg("Uncaught handled exception: %(exception)s\n" % {"exception":str(e)})
+ raise
+
+ portage_locks.unlockfile(mylock)
+ self.lock_held = 0
+
+ mydata = {}
+ for x in range(0,len(mylines)):
+ if mylines[x][-1] == '\n':
+ mylines[x] = mylines[x][:-1]
+ mydata[auxdbkeys[x]] = mylines[x]
+ mydata["_mtime_"] = emtime
+
+ self.auxdb[mylocation][cat][pkg] = mydata
+ self.auxdb[mylocation][cat].sync()
+ if not self.eclassdb.update_package(mylocation, cat, pkg, mylines[auxdbkeys.index("INHERITED")].split()):
+ sys.exit(1)
+
+ #finally, we look at our internal cache entry and return the requested data.
+ mydata = self.auxdb[mylocation][cat][pkg]
+ returnme = []
+ for x in mylist:
+ if mydata.has_key(x):
+ returnme.append(mydata[x])
+ else:
+ returnme.append("")
+
+ return returnme
+
+ def getfetchlist(self,mypkg,useflags=None,mysettings=None,all=0):
+ if mysettings == None:
+ mysettings = self.mysettings
+ try:
+ myuris = self.aux_get(mypkg,["SRC_URI"])[0]
+ except (IOError,KeyError):
+ print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
+ sys.exit(1)
+
+ useflags = string.split(mysettings["USE"])
+
+ myurilist = portage_dep.paren_reduce(myuris)
+ myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
+ newuris = flatten(myurilist)
+
+ myfiles = []
+ for x in newuris:
+ mya = os.path.basename(x)
+ if not mya in myfiles:
+ myfiles.append(mya)
+ return [newuris, myfiles]
+
+ def getfetchsizes(self,mypkg,useflags=None,debug=0):
+ # returns a filename:size dictionnary of remaining downloads
+ mydigest=self.finddigest(mypkg)
+ mymd5s=digestParseFile(mydigest)
+ if not mymd5s:
+ if debug: print "[empty/missing/bad digest]: "+mypkg
+ return None
+ filesdict={}
+ if useflags == None:
+ myuris, myfiles = self.getfetchlist(mypkg,all=1)
+ else:
+ myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
+ #XXX: maybe this should be improved: take partial downloads
+ # into account? check md5sums?
+ for myfile in myfiles:
+ if debug and myfile not in mymd5s.keys():
+ print "[bad digest]: missing",myfile,"for",mypkg
+ elif myfile in mymd5s.keys():
+ distfile=settings["DISTDIR"]+"/"+myfile
+ if not os.access(distfile, os.R_OK):
+ filesdict[myfile]=int(mymd5s[myfile]["size"])
+ return filesdict
+
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
+ if not useflags:
+ if mysettings:
+ useflags = mysettings["USE"].split()
+ myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
+ mydigest = self.finddigest(mypkg)
+ mysums = digestParseFile(mydigest)
+
+ failures = {}
+ for x in myfiles:
+ if not mysums or x not in mysums:
+ ok = False
+ reason = "digest missing"
+ else:
+ ok,reason = portage_checksum.verify_all(self.mysettings["DISTDIR"]+"/"+x, mysums[x])
+ if not ok:
+ failures[x] = reason
+ if failures:
+ return False
+ return True
+
+ def getsize(self,mypkg,useflags=None,debug=0):
+ # returns the total size of remaining downloads
+ #
+ # we use getfetchsizes() now, so this function would be obsoleted
+ #
+ filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
+ if filesdict==None:
+ return "[empty/missing/bad digest]"
+ mysize=0
+ for myfile in filesdict.keys():
+ mysum+=filesdict[myfile]
+ return mysum
+
+ def cpv_exists(self,mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ cps2=mykey.split("/")
+ cps=catpkgsplit(mykey,silent=0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
+ if self.findname(cps[0]+"/"+cps2[1]):
+ return 1
+ else:
+ return 0
+
+ def cp_all(self):
+ "returns a list of all keys in our tree"
+ d={}
+ for x in self.mysettings.categories:
+ for oroot in self.porttrees:
+ for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1):
+ mykey=x+"/"+y
+ d[x+"/"+y] = None
+ l = d.keys()
+ l.sort()
+ return l
+
+ def p_list(self,mycp):
+ d={}
+ for oroot in self.porttrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x[-7:]==".ebuild":
+ d[x[:-7]] = None
+ return d.keys()
+
+ def cp_list(self,mycp,use_cache=1):
+ mysplit=mycp.split("/")
+ d={}
+ for oroot in self.porttrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x[-7:]==".ebuild":
+ d[mysplit[0]+"/"+x[:-7]] = None
+ return d.keys()
+
+ def freeze(self):
+ for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
+ self.xcache[x]={}
+ self.frozen=1
+
+ def melt(self):
+ self.xcache={}
+ self.frozen=0
+
+ def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+ "caching match function; very trick stuff"
+ #if no updates are being made to the tree, we can consult our xcache...
+ if self.frozen:
+ try:
+ return self.xcache[level][origdep]
+ except KeyError:
+ pass
+
+ if not mydep:
+ #this stuff only runs on first call of xmatch()
+ #create mydep, mykey from origdep
+ mydep=dep_expand(origdep,mydb=self)
+ mykey=dep_getkey(mydep)
+
+ if level=="list-visible":
+ #a list of all visible packages, not called directly (just by xmatch())
+ #myval=self.visible(self.cp_list(mykey))
+ myval=self.gvisible(self.visible(self.cp_list(mykey)))
+ elif level=="bestmatch-visible":
+ #dep match -- best match of all visible packages
+ myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
+ #get all visible matches (from xmatch()), then choose the best one
+ elif level=="bestmatch-list":
+ #dep match -- find best match but restrict search to sublist
+ myval=best(match_from_list(mydep,mylist))
+ #no point is calling xmatch again since we're not caching list deps
+ elif level=="match-list":
+ #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+ myval=match_from_list(mydep,mylist)
+ elif level=="match-visible":
+ #dep match -- find all visible matches
+ myval=match_from_list(mydep,self.xmatch("list-visible",None,mydep=mydep,mykey=mykey))
+ #get all visible packages, then get the matching ones
+ elif level=="match-all":
+ #match *all* visible *and* masked packages
+ myval=match_from_list(mydep,self.cp_list(mykey))
+ else:
+ print "ERROR: xmatch doesn't handle",level,"query!"
+ raise KeyError
+ if self.frozen and (level not in ["match-list","bestmatch-list"]):
+ self.xcache[level][mydep]=myval
+ return myval
+
+ def match(self,mydep,use_cache=1):
+ return self.xmatch("match-visible",mydep)
+
+ def visible(self,mylist):
+ """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
+ packages file to remove invisible entries, returning remaining items. This function assumes
+ that all entries in mylist have the same category and package name."""
+ if (mylist==None) or (len(mylist)==0):
+ return []
+ newlist=mylist[:]
+ #first, we mask out packages in the package.mask file
+ mykey=newlist[0]
+ cpv=catpkgsplit(mykey)
+ if not cpv:
+ #invalid cat/pkg-v
+ print "visible(): invalid cat/pkg-v:",mykey
+ return []
+ mycp=cpv[0]+"/"+cpv[1]
+ maskdict=self.mysettings.pmaskdict
+ unmaskdict=self.mysettings.punmaskdict
+ if maskdict.has_key(mycp):
+ for x in maskdict[mycp]:
+ mymatches=self.xmatch("match-all",x)
+ if mymatches==None:
+ #error in package.mask file; print warning and continue:
+ print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
+ continue
+ for y in mymatches:
+ unmask=0
+ if unmaskdict.has_key(mycp):
+ for z in unmaskdict[mycp]:
+ mymatches_unmask=self.xmatch("match-all",z)
+ if y in mymatches_unmask:
+ unmask=1
+ break
+ if unmask==0:
+ try:
+ newlist.remove(y)
+ except ValueError:
+ pass
+
+ revmaskdict=self.mysettings.prevmaskdict
+ if revmaskdict.has_key(mycp):
+ for x in revmaskdict[mycp]:
+ #important: only match against the still-unmasked entries...
+ #notice how we pass "newlist" to the xmatch() call below....
+ #Without this, ~ deps in the packages files are broken.
+ mymatches=self.xmatch("match-list",x,mylist=newlist)
+ if mymatches==None:
+ #error in packages file; print warning and continue:
+ print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
+ continue
+ pos=0
+ while pos<len(newlist):
+ if newlist[pos] not in mymatches:
+ del newlist[pos]
+ else:
+ pos += 1
+ return newlist
+
+ def gvisible(self,mylist):
+ "strip out group-masked (not in current group) entries"
+ global groups
+ if mylist==None:
+ return []
+ newlist=[]
+
+ pkgdict = self.mysettings.pkeywordsdict
+ for mycpv in mylist:
+ #we need to update this next line when we have fully integrated the new db api
+ auxerr=0
+ try:
+ myaux=db["/"]["porttree"].dbapi.aux_get(mycpv, ["KEYWORDS"])
+ except (KeyError,IOError,TypeError):
+ continue
+ if not myaux[0]:
+ # KEYWORDS=""
+ #print "!!! No KEYWORDS for "+str(mycpv)+" -- Untested Status"
+ continue
+ mygroups=myaux[0].split()
+ pgroups=groups[:]
+ match=0
+ cp = dep_getkey(mycpv)
+ if pkgdict.has_key(cp):
+ matches = match_to_list(mycpv, pkgdict[cp].keys())
+ for atom in matches:
+ pgroups.extend(pkgdict[cp][atom])
+ for gp in mygroups:
+ if gp=="*":
+ writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv)
+ match=1
+ break
+ elif "-"+gp in pgroups:
+ match=0
+ break
+ elif gp in pgroups:
+ match=1
+ break
+ if match:
+ newlist.append(mycpv)
+ return newlist
+
+class binarytree(packagetree):
+ "this tree scans for a list of all packages available in PKGDIR"
+ def __init__(self,root,pkgdir,virtual=None,clone=None):
+
+ if clone:
+ # XXX This isn't cloning. It's an instance of the same thing.
+ self.root=clone.root
+ self.pkgdir=clone.pkgdir
+ self.dbapi=clone.dbapi
+ self.populated=clone.populated
+ self.tree=clone.tree
+ self.remotepkgs=clone.remotepkgs
+ self.invalids=clone.invalids
+ else:
+ self.root=root
+ #self.pkgdir=settings["PKGDIR"]
+ self.pkgdir=pkgdir
+ self.dbapi=bindbapi(self)
+ self.populated=0
+ self.tree={}
+ self.remotepkgs={}
+ self.invalids=[]
+
+ def move_ent(self,mylist):
+ if not self.populated:
+ self.populate()
+ origcp=mylist[1]
+ newcp=mylist[2]
+ mynewcat=newcp.split("/")[0]
+ origmatches=self.dbapi.cp_list(origcp)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+
+ mycpsplit=catpkgsplit(mycpv)
+ mynewcpv=newcp+"-"+mycpsplit[2]
+ if mycpsplit[3]!="r0":
+ mynewcpv += "-"+mycpsplit[3]
+
+ myoldpkg=mycpv.split("/")[1]
+ mynewpkg=mynewcpv.split("/")[1]
+
+ if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
+ writemsg("!!! Cannot update binary: Destination exists.\n")
+ writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n")
+ continue
+
+ tbz2path=self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
+ continue
+
+ #print ">>> Updating data in:",mycpv
+ sys.stdout.write("%")
+ sys.stdout.flush()
+ mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
+ mytbz2=xpak.tbz2(tbz2path)
+ mytbz2.decompose(mytmpdir, cleanup=1)
+
+ fixdbentries(origcp, newcp, mytmpdir)
+
+ catfile=open(mytmpdir+"/CATEGORY", "w")
+ catfile.write(mynewcat+"\n")
+ catfile.close()
+ try:
+ os.rename(mytmpdir+"/"+string.split(mycpv,"/")[1]+".ebuild", mytmpdir+"/"+string.split(mynewcpv, "/")[1]+".ebuild")
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+
+ mytbz2.recompose(mytmpdir, cleanup=1)
+
+ self.dbapi.cpv_remove(mycpv)
+ if (mynewpkg != myoldpkg):
+ os.rename(tbz2path,self.getname(mynewcpv))
+ self.dbapi.cpv_inject(mynewcpv)
+ return 1
+
+ def move_slot_ent(self,mylist,mytmpdir):
+ #mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
+ mytmpdir=mytmpdir+"/tbz2"
+ if not self.populated:
+ self.populate()
+ pkg=mylist[1]
+ origslot=mylist[2]
+ newslot=mylist[3]
+ origmatches=self.dbapi.match(pkg)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ mycpsplit=catpkgsplit(mycpv)
+ myoldpkg=mycpv.split("/")[1]
+ tbz2path=self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
+ continue
+
+ #print ">>> Updating data in:",mycpv
+ mytbz2=xpak.tbz2(tbz2path)
+ mytbz2.decompose(mytmpdir, cleanup=1)
+
+ slot=grabfile(mytmpdir+"/SLOT");
+ if (not slot):
+ continue
+
+ if (slot[0]!=origslot):
+ continue
+
+ sys.stdout.write("S")
+ sys.stdout.flush()
+
+ slotfile=open(mytmpdir+"/SLOT", "w")
+ slotfile.write(newslot+"\n")
+ slotfile.close()
+ mytbz2.recompose(mytmpdir, cleanup=1)
+ return 1
+
+ def update_ents(self,mybiglist,mytmpdir):
+ #XXX mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
+ if not self.populated:
+ self.populate()
+ for mycpv in self.dbapi.cp_all():
+ tbz2path=self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
+ continue
+ #print ">>> Updating binary data:",mycpv
+ writemsg("*")
+ mytbz2=xpak.tbz2(tbz2path)
+ mytbz2.decompose(mytmpdir,cleanup=1)
+ for mylist in mybiglist:
+ mylist=string.split(mylist)
+ if mylist[0] != "move":
+ continue
+ fixdbentries(mylist[1], mylist[2], mytmpdir)
+ mytbz2.recompose(mytmpdir,cleanup=1)
+ return 1
+
+ def populate(self, getbinpkgs=0,getbinpkgsonly=0):
+ "populates the binarytree"
+ if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
+ return 0
+ if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
+ return 0
+
+ if (not getbinpkgsonly) and os.path.exists(self.pkgdir+"/All"):
+ for mypkg in listdir(self.pkgdir+"/All"):
+ if mypkg[-5:]!=".tbz2":
+ continue
+ mytbz2=xpak.tbz2(self.pkgdir+"/All/"+mypkg)
+ mycat=mytbz2.getfile("CATEGORY")
+ if not mycat:
+ #old-style or corrupt package
+ writemsg("!!! Invalid binary package: "+mypkg+"\n")
+ self.invalids.append(mypkg)
+ continue
+ mycat=string.strip(mycat)
+ fullpkg=mycat+"/"+mypkg[:-5]
+ mykey=dep_getkey(fullpkg)
+ try:
+ # invalid tbz2's can hurt things.
+ self.dbapi.cpv_inject(fullpkg)
+ except SystemExit, e:
+ raise
+ except:
+ continue
+
+ if getbinpkgs and not settings["PORTAGE_BINHOST"]:
+ writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"))
+
+ if getbinpkgs and settings["PORTAGE_BINHOST"] and not self.remotepkgs:
+ try:
+ chunk_size = long(settings["PORTAGE_BINHOST_CHUNKSIZE"])
+ if chunk_size < 8:
+ chunk_size = 8
+ except SystemExit, e:
+ raise
+ except:
+ chunk_size = 3000
+
+ writemsg(green("Fetching binary packages info...\n"))
+ self.remotepkgs = getbinpkg.dir_get_metadata(settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
+ writemsg(green(" -- DONE!\n\n"))
+
+ for mypkg in self.remotepkgs.keys():
+ if not self.remotepkgs[mypkg].has_key("CATEGORY"):
+ #old-style or corrupt package
+ writemsg("!!! Invalid remote binary package: "+mypkg+"\n")
+ del self.remotepkgs[mypkg]
+ continue
+ mycat=string.strip(self.remotepkgs[mypkg]["CATEGORY"])
+ fullpkg=mycat+"/"+mypkg[:-5]
+ mykey=dep_getkey(fullpkg)
+ try:
+ # invalid tbz2's can hurt things.
+ #print "cpv_inject("+str(fullpkg)+")"
+ self.dbapi.cpv_inject(fullpkg)
+ #print " -- Injected"
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n")
+ del self.remotepkgs[mypkg]
+ continue
+ self.populated=1
+
+ def inject(self,cpv):
+ return self.dbapi.cpv_inject(cpv)
+
+ def exists_specific(self,cpv):
+ if not self.populated:
+ self.populate()
+ return self.dbapi.match(dep_expand("="+cpv,mydb=self.dbapi))
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method -- all matches, not just visible ones"
+ if not self.populated:
+ self.populate()
+ writemsg("\n\n", 1)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mydep=dep_expand(mydep,mydb=self.dbapi)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mykey=dep_getkey(mydep)
+ writemsg("mykey: %s\n" % mykey, 1)
+ mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
+ writemsg("mymatch: %s\n" % mymatch, 1)
+ if mymatch==None:
+ return ""
+ return mymatch
+
+ def getname(self,pkgname):
+ "returns file location for this particular package"
+ mysplit=string.split(pkgname,"/")
+ if len(mysplit)==1:
+ return self.pkgdir+"/All/"+self.resolve_specific(pkgname)+".tbz2"
+ else:
+ return self.pkgdir+"/All/"+mysplit[1]+".tbz2"
+
+ def isremote(self,pkgname):
+ "Returns true if the package is kept remotely."
+ mysplit=string.split(pkgname,"/")
+ remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
+ return remote
+
+ def get_use(self,pkgname):
+ mysplit=string.split(pkgname,"/")
+ if self.isremote(pkgname):
+ return string.split(self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:])
+ tbz2=xpak.tbz2(self.getname(pkgname))
+ return string.split(tbz2.getfile("USE"))
+
+ def gettbz2(self,pkgname):
+ "fetches the package from a remote site, if necessary."
+ print "Fetching '"+str(pkgname)+"'"
+ mysplit = string.split(pkgname,"/")
+ tbz2name = mysplit[1]+".tbz2"
+ if not self.isremote(pkgname):
+ if (tbz2name not in self.invalids):
+ return
+ else:
+ writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n")
+ mydest = self.pkgdir+"/All/"
+ try:
+ os.makedirs(mydest, 0775)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ getbinpkg.file_get(settings["PORTAGE_BINHOST"]+"/"+tbz2name, mydest, fcmd=settings["RESUMECOMMAND"])
+ return
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+ return myslot
+
+class dblink:
+ "this class provides an interface to the standard text package database"
+ def __init__(self,cat,pkg,myroot,mysettings):
+ "create a dblink object for cat/pkg. This dblink entry may or may not exist"
+ self.cat = cat
+ self.pkg = pkg
+ self.mycpv = self.cat+"/"+self.pkg
+ self.mysplit = pkgsplit(self.mycpv)
+
+ self.dbroot = os.path.normpath(myroot+VDB_PATH)
+ self.dbcatdir = self.dbroot+"/"+cat
+ self.dbpkgdir = self.dbcatdir+"/"+pkg
+ self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
+ self.dbdir = self.dbpkgdir
+
+ self.lock_pkg = None
+ self.lock_tmp = None
+ self.lock_num = 0 # Count of the held locks on the db.
+
+ self.settings = mysettings
+ if self.settings==1:
+ raise ValueError
+
+ self.myroot=myroot
+ self.updateprotect()
+ self.contentscache=[]
+
+ def lockdb(self):
+ if self.lock_num == 0:
+ self.lock_pkg = portage_locks.lockdir(self.dbpkgdir)
+ self.lock_tmp = portage_locks.lockdir(self.dbtmpdir)
+ self.lock_num += 1
+
+ def unlockdb(self):
+ self.lock_num -= 1
+ if self.lock_num == 0:
+ portage_locks.unlockdir(self.lock_tmp)
+ portage_locks.unlockdir(self.lock_pkg)
+
+ def getpath(self):
+ "return path to location of db information (for >>> informational display)"
+ return self.dbdir
+
+ def exists(self):
+ "does the db entry exist? boolean."
+ return os.path.exists(self.dbdir)
+
+ def create(self):
+ "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
+ # XXXXX Delete this eventually
+ raise Exception, "This is bad. Don't use it."
+ if not os.path.exists(self.dbdir):
+ os.makedirs(self.dbdir)
+
+ def delete(self):
+ "erase this db entry completely"
+ if not os.path.exists(self.dbdir):
+ return
+ try:
+ for x in listdir(self.dbdir):
+ os.unlink(self.dbdir+"/"+x)
+ os.rmdir(self.dbdir)
+ except OSError, e:
+ print "!!! Unable to remove db entry for this package."
+ print "!!! It is possible that a directory is in this one. Portage will still"
+ print "!!! register this package as installed as long as this directory exists."
+ print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
+ print "!!! "+str(e)
+ print
+ sys.exit(1)
+
+ def clearcontents(self):
+ if os.path.exists(self.dbdir+"/CONTENTS"):
+ os.unlink(self.dbdir+"/CONTENTS")
+
+ def getcontents(self):
+ if not os.path.exists(self.dbdir+"/CONTENTS"):
+ return None
+ if self.contentscache != []:
+ return self.contentscache
+ pkgfiles={}
+ myc=open(self.dbdir+"/CONTENTS","r")
+ mylines=myc.readlines()
+ myc.close()
+ pos=1
+ for line in mylines:
+ mydat = string.split(line)
+ # we do this so we can remove from non-root filesystems
+ # (use the ROOT var to allow maintenance on other partitions)
+ try:
+ mydat[1]=os.path.normpath(root+mydat[1][1:])
+ if mydat[0]=="obj":
+ #format: type, mtime, md5sum
+ pkgfiles[string.join(mydat[1:-2]," ")]=[mydat[0], mydat[-1], mydat[-2]]
+ elif mydat[0]=="dir":
+ #format: type
+ pkgfiles[string.join(mydat[1:])]=[mydat[0] ]
+ elif mydat[0]=="sym":
+ #format: type, mtime, dest
+ x=len(mydat)-1
+ if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
+ mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
+ writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
+ x=len(mydat)-1
+ splitter=-1
+ while(x>=0):
+ if mydat[x]=="->":
+ splitter=x
+ break
+ x=x-1
+ if splitter==-1:
+ return None
+ pkgfiles[string.join(mydat[1:splitter]," ")]=[mydat[0], mydat[-1], string.join(mydat[(splitter+1):-1]," ")]
+ elif mydat[0]=="dev":
+ #format: type
+ pkgfiles[string.join(mydat[1:]," ")]=[mydat[0] ]
+ elif mydat[0]=="fif":
+ #format: type
+ pkgfiles[string.join(mydat[1:]," ")]=[mydat[0]]
+ else:
+ return None
+ except (KeyError,IndexError):
+ print "portage: CONTENTS line",pos,"corrupt!"
+ pos += 1
+ self.contentscache=pkgfiles
+ return pkgfiles
+
+ def updateprotect(self):
+ #do some config file management prep
+ self.protect=[]
+ for x in string.split(self.settings["CONFIG_PROTECT"]):
+ ppath=normalize_path(self.myroot+x)+"/"
+ if os.path.isdir(ppath):
+ self.protect.append(ppath)
+
+ self.protectmask=[]
+ for x in string.split(self.settings["CONFIG_PROTECT_MASK"]):
+ ppath=normalize_path(self.myroot+x)+"/"
+ if os.path.isdir(ppath):
+ self.protectmask.append(ppath)
+ #if it doesn't exist, silently skip it
+
+ def isprotected(self,obj):
+ """Checks if obj is in the current protect/mask directories. Returns
+ 0 on unprotected/masked, and 1 on protected."""
+ masked=0
+ protected=0
+ for ppath in self.protect:
+ if (len(ppath) > masked) and (obj[0:len(ppath)]==ppath):
+ protected=len(ppath)
+ #config file management
+ for pmpath in self.protectmask:
+ if (len(pmpath) >= protected) and (obj[0:len(pmpath)]==pmpath):
+ #skip, it's in the mask
+ masked=len(pmpath)
+ return (protected > masked)
+
+ def unmerge(self,pkgfiles=None,trimworld=1,cleanup=0):
+ global dircache
+ dircache={}
+
+ self.lockdb()
+
+ self.settings.load_infodir(self.dbdir)
+
+ if not pkgfiles:
+ print "No package files given... Grabbing a set."
+ pkgfiles=self.getcontents()
+
+ # Now, don't assume that the name of the ebuild is the same as the
+ # name of the dir; the package may have been moved.
+ myebuildpath=None
+
+ # We should use the environement file if possible,
+ # as it has all sourced files already included.
+ # XXX: Need to ensure it doesn't overwrite any important vars though.
+ if os.access(self.dbdir+"/environment.bz2", os.R_OK):
+ spawn("bzip2 -d "+self.dbdir+"/environment.bz2",self.settings,free=1)
+
+ if not myebuildpath:
+ mystuff=listdir(self.dbdir,EmptyOnError=1)
+ for x in mystuff:
+ if x[-7:]==".ebuild":
+ myebuildpath=self.dbdir+"/"+x
+ break
+
+ #do prerm script
+ if myebuildpath and os.path.exists(myebuildpath):
+ a=doebuild(myebuildpath,"prerm",self.myroot,self.settings,cleanup=cleanup,use_cache=0,tree="vartree")
+ # XXX: Decide how to handle failures here.
+ if a != 0:
+ writemsg("!!! FAILED prerm: "+str(a)+"\n")
+ sys.exit(123)
+
+ if pkgfiles:
+ mykeys=pkgfiles.keys()
+ mykeys.sort()
+ mykeys.reverse()
+
+ self.updateprotect()
+
+ #process symlinks second-to-last, directories last.
+ mydirs=[]
+ mysyms=[]
+ modprotect="/lib/modules/"
+ for obj in mykeys:
+ obj=os.path.normpath(obj)
+ if obj[:2]=="//":
+ obj=obj[1:]
+ if not os.path.exists(obj):
+ if not os.path.islink(obj):
+ #we skip this if we're dealing with a symlink
+ #because os.path.exists() will operate on the
+ #link target rather than the link itself.
+ print "--- !found "+str(pkgfiles[obj][0]), obj
+ continue
+ # next line includes a tweak to protect modules from being unmerged,
+ # but we don't protect modules from being overwritten if they are
+ # upgraded. We effectively only want one half of the config protection
+ # functionality for /lib/modules. For portage-ng both capabilities
+ # should be able to be independently specified.
+ if self.isprotected(obj) or ((len(obj) > len(modprotect)) and (obj[0:len(modprotect)]==modprotect)):
+ print "--- cfgpro "+str(pkgfiles[obj][0]), obj
+ continue
+
+ lstatobj=os.lstat(obj)
+ lmtime=str(lstatobj[stat.ST_MTIME])
+ if (pkgfiles[obj][0] not in ("dir","fif","dev","sym")) and (lmtime != pkgfiles[obj][1]):
+ print "--- !mtime", pkgfiles[obj][0], obj
+ continue
+
+ if pkgfiles[obj][0]=="dir":
+ if not os.path.isdir(obj):
+ print "--- !dir ","dir", obj
+ continue
+ mydirs.append(obj)
+ elif pkgfiles[obj][0]=="sym":
+ if not os.path.islink(obj):
+ print "--- !sym ","sym", obj
+ continue
+ mysyms.append(obj)
+ elif pkgfiles[obj][0]=="obj":
+ if not os.path.isfile(obj):
+ print "--- !obj ","obj", obj
+ continue
+ mymd5=portage_checksum.perform_md5(obj, calc_prelink=1)
+
+ # string.lower is needed because db entries used to be in upper-case. The
+ # string.lower allows for backwards compatibility.
+ if mymd5 != string.lower(pkgfiles[obj][2]):
+ print "--- !md5 ","obj", obj
+ continue
+ try:
+ os.unlink(obj)
+ except (OSError,IOError),e:
+ pass
+ print "<<< ","obj",obj
+ elif pkgfiles[obj][0]=="fif":
+ if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
+ print "--- !fif ","fif", obj
+ continue
+ try:
+ os.unlink(obj)
+ except (OSError,IOError),e:
+ pass
+ print "<<< ","fif",obj
+ elif pkgfiles[obj][0]=="dev":
+ print "--- ","dev",obj
+
+ #Now, we need to remove symlinks and directories. We'll repeatedly
+ #remove dead symlinks, then directories until we stop making progress.
+ #This is how we'll clean up directories containing symlinks pointing to
+ #directories that are now empty. These cases will require several
+ #iterations through our two-stage symlink/directory cleaning loop.
+
+ #main symlink and directory removal loop:
+
+ #progress -- are we making progress? Initialized to 1 so loop will start
+ progress=1
+ while progress:
+ #let's see if we're able to make progress this iteration...
+ progress=0
+
+ #step 1: remove all the dead symlinks we can...
+
+ pos = 0
+ while pos<len(mysyms):
+ obj=mysyms[pos]
+ if os.path.exists(obj):
+ pos += 1
+ else:
+ #we have a dead symlink; remove it from our list, then from existence
+ del mysyms[pos]
+ #we've made progress!
+ progress = 1
+ try:
+ os.unlink(obj)
+ print "<<< ","sym",obj
+ except (OSError,IOError),e:
+ print "!!! ","sym",obj
+ #immutable?
+ pass
+
+ #step 2: remove all the empty directories we can...
+
+ pos = 0
+ while pos<len(mydirs):
+ obj=mydirs[pos]
+ objld=listdir(obj)
+
+ if objld == None:
+ print "mydirs["+str(pos)+"]",mydirs[pos]
+ print "obj",obj
+ print "objld",objld
+ # the directory doesn't exist yet, continue
+ pos += 1
+ continue
+
+ if len(objld)>0:
+ #we won't remove this directory (yet), continue
+ pos += 1
+ continue
+ elif (objld != None):
+ #zappo time
+ del mydirs[pos]
+ #we've made progress!
+ progress = 1
+ try:
+ os.rmdir(obj)
+ print "<<< ","dir",obj
+ except (OSError,IOError),e:
+ #immutable?
+ pass
+ #else:
+ # print "--- !empty","dir", obj
+ # continue
+
+ #step 3: if we've made progress, we'll give this another go...
+
+ #step 4: otherwise, we'll print out the remaining stuff that we didn't unmerge (and rightly so!)
+
+ #directories that aren't empty:
+ for x in mydirs:
+ print "--- !empty dir", x
+
+ #symlinks whose target still exists:
+ for x in mysyms:
+ print "--- !targe sym", x
+
+ #step 5: well, removal of package objects is complete, now for package *meta*-objects....
+
+ #remove self from vartree database so that our own virtual gets zapped if we're the last node
+ db[self.myroot]["vartree"].zap(self.mycpv)
+
+ # New code to remove stuff from the world and virtuals files when unmerged.
+ if trimworld:
+ worldlist=grabfile(self.myroot+WORLD_FILE)
+ mykey=cpv_getkey(self.mycpv)
+ newworldlist=[]
+ for x in worldlist:
+ if dep_getkey(x)==mykey:
+ matches=db[self.myroot]["vartree"].dbapi.match(x,use_cache=0)
+ if not matches:
+ #zap our world entry
+ pass
+ elif (len(matches)==1) and (matches[0]==self.mycpv):
+ #zap our world entry
+ pass
+ else:
+ #others are around; keep it.
+ newworldlist.append(x)
+ else:
+ #this doesn't match the package we're unmerging; keep it.
+ newworldlist.append(x)
+
+ # if the base dir doesn't exist, create it.
+ # (spanky noticed bug)
+ # XXX: dumb question, but abstracting the root uid might be wise/useful for
+ # 2nd pkg manager installation setups.
+ if not os.path.exists(os.path.dirname(self.myroot+WORLD_FILE)):
+ pdir = os.path.dirname(self.myroot + WORLD_FILE)
+ os.makedirs(pdir, mode=0755)
+ os.chown(pdir, 0, portage_gid)
+ os.chmod(pdir, 02770)
+
+ myworld=open(self.myroot+WORLD_FILE,"w")
+ for x in newworldlist:
+ myworld.write(x+"\n")
+ myworld.close()
+
+ #do original postrm
+ if myebuildpath and os.path.exists(myebuildpath):
+ # XXX: This should be the old config, not the current one.
+ # XXX: Use vardbapi to load up env vars.
+ a=doebuild(myebuildpath,"postrm",self.myroot,self.settings,use_cache=0,tree="vartree")
+ # XXX: Decide how to handle failures here.
+ if a != 0:
+ writemsg("!!! FAILED postrm: "+str(a)+"\n")
+ sys.exit(123)
+
+ self.unlockdb()
+
+ def isowner(self,filename,destroot):
+ """ check if filename is a new file or belongs to this package
+ (for this or a previous version)"""
+ destfile = os.path.normpath(destroot+"/"+filename)
+ if not os.path.exists(destfile):
+ return True
+ if self.getcontents() and filename in self.getcontents().keys():
+ return True
+
+ return False
+
+ def treewalk(self,srcroot,destroot,inforoot,myebuild,cleanup=0):
+ global db
+ # srcroot = ${D};
+ # destroot = where to merge, ie. ${ROOT},
+ # inforoot = root of db entry,
+ # secondhand = list of symlinks that have been skipped due to
+ # their target not existing (will merge later),
+
+ if not os.path.exists(self.dbcatdir):
+ os.makedirs(self.dbcatdir)
+
+ # This blocks until we can get the dirs to ourselves.
+ self.lockdb()
+
+ otherversions=[]
+ for v in db[self.myroot]["vartree"].dbapi.cp_list(self.mysplit[0]):
+ otherversions.append(v.split("/")[1])
+
+ # check for package collisions
+ if "collision-protect" in features:
+ myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
+
+ # the linkcheck only works if we are in srcroot
+ mycwd = os.getcwd()
+ os.chdir(srcroot)
+ mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
+
+ stopmerge=False
+ starttime=time.time()
+ i=0
+
+ otherpkg=[]
+ mypkglist=[]
+
+ if self.pkg in otherversions:
+ otherversions.remove(self.pkg) # we already checked this package
+
+ for v in otherversions:
+ # should we check for same SLOT here ?
+ mypkglist.append(dblink(self.cat,v,destroot,self.settings))
+
+ print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
+ for f in myfilelist:
+ nocheck = False
+ # listdir isn't intelligent enough to exclude symlinked dirs,
+ # so we have to do it ourself
+ for s in mysymlinks:
+ # the length comparison makes sure that the symlink itself is checked
+ if f[:len(s)] == s and len(f) > len(s):
+ nocheck = True
+ if nocheck:
+ continue
+ i=i+1
+ if i % 1000 == 0:
+ print str(i)+" files checked ..."
+ if f[0] != "/":
+ f="/"+f
+ isowned = False
+ for ver in [self]+mypkglist:
+ if (ver.isowner(f, destroot) or ver.isprotected(f)):
+ isowned = True
+ break
+ if not isowned:
+ print "existing file "+f+" is not owned by this package"
+ stopmerge=True
+ print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
+ if stopmerge:
+ print red("*")+" This package is blocked because it wants to overwrite"
+ print red("*")+" files belonging to other packages (see messages above)."
+ print red("*")+" If you have no clue what this is all about report it "
+ print red("*")+" as a bug for this package on http://bugs.gentoo.org"
+ print
+ print red("package "+self.cat+"/"+self.pkg+" NOT merged")
+ print
+ # Why is the package already merged here db-wise? Shouldn't be the case
+ # only unmerge if it ia new package and has no contents
+ if not self.getcontents():
+ self.unmerge()
+ self.delete()
+ self.unlockdb()
+ sys.exit(1)
+ try:
+ os.chdir(mycwd)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+
+
+ # get old contents info for later unmerging
+ oldcontents = self.getcontents()
+
+ self.dbdir = self.dbtmpdir
+ self.delete()
+ if not os.path.exists(self.dbtmpdir):
+ os.makedirs(self.dbtmpdir)
+
+ print ">>> Merging",self.mycpv,"to",destroot
+
+ # run preinst script
+ if myebuild:
+ # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
+ # (if any).
+ a=doebuild(myebuild,"preinst",root,self.settings,cleanup=cleanup,use_cache=0)
+ else:
+ a=doebuild(inforoot+"/"+self.pkg+".ebuild","preinst",root,self.settings,cleanup=cleanup,use_cache=0)
+
+ # XXX: Decide how to handle failures here.
+ if a != 0:
+ writemsg("!!! FAILED preinst: "+str(a)+"\n")
+ sys.exit(123)
+
+ # copy "info" files (like SLOT, CFLAGS, etc.) into the database
+ for x in listdir(inforoot):
+ self.copyfile(inforoot+"/"+x)
+
+ # get current counter value (counter_tick also takes care of incrementing it)
+ # XXX Need to make this destroot, but it needs to be initialized first. XXX
+ # XXX bis: leads to some invalidentry() call through cp_all().
+ counter = db["/"]["vartree"].dbapi.counter_tick(self.myroot,mycpv=self.mycpv)
+ # write local package counter for recording
+ lcfile = open(self.dbtmpdir+"/COUNTER","w")
+ lcfile.write(str(counter))
+ lcfile.close()
+
+ # open CONTENTS file (possibly overwriting old one) for recording
+ outfile=open(self.dbtmpdir+"/CONTENTS","w")
+
+ self.updateprotect()
+
+ #if we have a file containing previously-merged config file md5sums, grab it.
+ if os.path.exists(destroot+CONFIG_MEMORY_FILE):
+ cfgfiledict=grabdict(destroot+CONFIG_MEMORY_FILE)
+ else:
+ cfgfiledict={}
+ if self.settings.has_key("NOCONFMEM"):
+ cfgfiledict["IGNORE"]=1
+ else:
+ cfgfiledict["IGNORE"]=0
+
+ # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
+ mymtime = long(time.time())
+ prevmask = os.umask(0)
+ secondhand = []
+
+ # we do a first merge; this will recurse through all files in our srcroot but also build up a
+ # "second hand" of symlinks to merge later
+ if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
+ return 1
+
+ # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
+ # broken symlinks. We'll merge them too.
+ lastlen=0
+ while len(secondhand) and len(secondhand)!=lastlen:
+ # clear the thirdhand. Anything from our second hand that
+ # couldn't get merged will be added to thirdhand.
+
+ thirdhand=[]
+ self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
+
+ #swap hands
+ lastlen=len(secondhand)
+
+ # our thirdhand now becomes our secondhand. It's ok to throw
+ # away secondhand since thirdhand contains all the stuff that
+ # couldn't be merged.
+ secondhand = thirdhand
+
+ if len(secondhand):
+ # force merge of remaining symlinks (broken or circular; oh well)
+ self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
+
+ #restore umask
+ os.umask(prevmask)
+
+ #if we opened it, close it
+ outfile.flush()
+ outfile.close()
+
+ if (oldcontents):
+ print ">>> Safely unmerging already-installed instance..."
+ self.dbdir = self.dbpkgdir
+ self.unmerge(oldcontents,trimworld=0)
+ self.dbdir = self.dbtmpdir
+ print ">>> original instance of package unmerged safely."
+
+ # We hold both directory locks.
+ self.dbdir = self.dbpkgdir
+ self.delete()
+ movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+
+ self.unlockdb()
+
+ #write out our collection of md5sums
+ if cfgfiledict.has_key("IGNORE"):
+ del cfgfiledict["IGNORE"]
+
+ # XXXX: HACK! PathSpec is very necessary here.
+ if not os.path.exists(destroot+PRIVATE_PATH):
+ os.makedirs(destroot+PRIVATE_PATH)
+ os.chown(destroot+PRIVATE_PATH,os.getuid(),portage_gid)
+ os.chmod(destroot+PRIVATE_PATH,02770)
+ dirlist = prefix_array(listdir(destroot+PRIVATE_PATH),destroot+PRIVATE_PATH+"/")
+ while dirlist:
+ dirlist.sort()
+ dirlist.reverse() # Gets them in file-before basedir order
+ x = dirlist[0]
+ if os.path.isdir(x):
+ dirlist += prefix_array(listdir(x),x+"/")
+ continue
+ os.unlink(destroot+PRIVATE_PATH+"/"+x)
+
+ mylock = portage_locks.lockfile(destroot+CONFIG_MEMORY_FILE)
+ writedict(cfgfiledict,destroot+CONFIG_MEMORY_FILE)
+ portage_locks.unlockfile(mylock)
+
+ #do postinst script
+ if myebuild:
+ # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
+ # (if any).
+ a=doebuild(myebuild,"postinst",root,self.settings,use_cache=0)
+ else:
+ a=doebuild(inforoot+"/"+self.pkg+".ebuild","postinst",root,self.settings,use_cache=0)
+
+ # XXX: Decide how to handle failures here.
+ if a != 0:
+ writemsg("!!! FAILED postinst: "+str(a)+"\n")
+ sys.exit(123)
+
+ downgrade = False
+ for v in otherversions:
+ if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
+ downgrade = True
+
+ #update environment settings, library paths. DO NOT change symlinks.
+ env_update(makelinks=(not downgrade))
+ #dircache may break autoclean because it remembers the -MERGING-pkg file
+ global dircache
+ if dircache.has_key(self.dbcatdir):
+ del dircache[self.dbcatdir]
+ print ">>>",self.mycpv,"merged."
+ return 0
+
+ def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
+ srcroot=os.path.normpath("///"+srcroot)+"/"
+ destroot=os.path.normpath("///"+destroot)+"/"
+ # this is supposed to merge a list of files. There will be 2 forms of argument passing.
+ if type(stufftomerge)==types.StringType:
+ #A directory is specified. Figure out protection paths, listdir() it and process it.
+ mergelist=listdir(srcroot+stufftomerge)
+ offset=stufftomerge
+ # We need mydest defined up here to calc. protection paths. This is now done once per
+ # directory rather than once per file merge. This should really help merge performance.
+ # Trailing / ensures that protects/masks with trailing /'s match.
+ mytruncpath="/"+offset+"/"
+ myppath=self.isprotected(mytruncpath)
+ else:
+ mergelist=stufftomerge
+ offset=""
+ for x in mergelist:
+ mysrc=os.path.normpath("///"+srcroot+offset+x)
+ mydest=os.path.normpath("///"+destroot+offset+x)
+ # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
+ myrealdest="/"+offset+x
+ # stat file once, test using S_* macros many times (faster that way)
+ try:
+ mystat=os.lstat(mysrc)
+ except SystemExit, e:
+ raise
+ except OSError, e:
+ writemsg("\n")
+ writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
+ writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
+ writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
+ writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
+ writemsg(red("!!! File: ")+str(mysrc)+"\n")
+ writemsg(red("!!! Error: ")+str(e)+"\n")
+ sys.exit(1)
+ except Exception, e:
+ writemsg("\n")
+ writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
+ writemsg(red("!!! A stat call returned the following error for the following file:"))
+ writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
+ writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
+ writemsg( "!!! File: "+str(mysrc)+"\n")
+ writemsg( "!!! Error: "+str(e)+"\n")
+ sys.exit(1)
+
+
+ mymode=mystat[stat.ST_MODE]
+ # handy variables; mydest is the target object on the live filesystems;
+ # mysrc is the source object in the temporary install dir
+ try:
+ mydmode=os.lstat(mydest)[stat.ST_MODE]
+ except SystemExit, e:
+ raise
+ except:
+ #dest file doesn't exist
+ mydmode=None
+
+ if stat.S_ISLNK(mymode):
+ # we are merging a symbolic link
+ myabsto=abssymlink(mysrc)
+ if myabsto[0:len(srcroot)]==srcroot:
+ myabsto=myabsto[len(srcroot):]
+ if myabsto[0]!="/":
+ myabsto="/"+myabsto
+ myto=os.readlink(mysrc)
+ if self.settings and self.settings["D"]:
+ if myto.find(self.settings["D"])==0:
+ myto=myto[len(self.settings["D"]):]
+ # myrealto contains the path of the real file to which this symlink points.
+ # we can simply test for existence of this file to see if the target has been merged yet
+ myrealto=os.path.normpath(os.path.join(destroot,myabsto))
+ if mydmode!=None:
+ #destination exists
+ if not stat.S_ISLNK(mydmode):
+ if stat.S_ISDIR(mydmode):
+ # directory in the way: we can't merge a symlink over a directory
+ # we won't merge this, continue with next file...
+ continue
+ if self.isprotected(mydest):
+ # Use md5 of the target in ${D} if it exists...
+ if os.path.exists(os.path.normpath(srcroot+myabsto)):
+ try:
+ mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(srcroot+myabsto))
+ except IOError:
+ print "========================================"
+ print "mysrc",mysrc
+ print "mymode",mymode
+ print "myabsto",myabsto
+ print "myto",myto
+ print "myrealto",myrealto
+ print "mydest",mydest
+ print "mydmode",mydmode
+ print "========================================"
+ print "Please file the above in bug #71787"
+ sys.exit(1)
+ else:
+ mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(myabsto))
+
+ # if secondhand==None it means we're operating in "force" mode and should not create a second hand.
+ if (secondhand!=None) and (not os.path.exists(myrealto)):
+ # either the target directory doesn't exist yet or the target file doesn't exist -- or
+ # the target is a broken symlink. We will add this file to our "second hand" and merge
+ # it later.
+ secondhand.append(mysrc[len(srcroot):])
+ continue
+ # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
+ mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
+ if mymtime!=None:
+ print ">>>",mydest,"->",myto
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ else:
+ print "!!! Failed to move file."
+ print "!!!",mydest,"->",myto
+ sys.exit(1)
+ elif stat.S_ISDIR(mymode):
+ # we are merging a directory
+ if mydmode!=None:
+ # destination exists
+
+ if bsd_chflags:
+ # Save then clear flags on dest.
+ dflags=bsd_chflags.lgetflags(mydest)
+ if(bsd_chflags.lchflags(mydest, 0)<0):
+ writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n")
+
+ if not os.access(mydest, os.W_OK):
+ pkgstuff = pkgsplit(self.pkg)
+ writemsg("\n!!! Cannot write to '"+mydest+"'.\n")
+ writemsg("!!! Please check permissions and directories for broken symlinks.\n")
+ writemsg("!!! You may start the merge process again by using ebuild:\n")
+ writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
+ writemsg("!!! And finish by running this: env-update\n\n")
+ return 1
+
+ if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
+ # a symlink to an existing directory will work for us; keep it:
+ print "---",mydest+"/"
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ else:
+ # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
+ if movefile(mydest,mydest+".backup", mysettings=self.settings) == None:
+ sys.exit(1)
+ print "bak",mydest,mydest+".backup"
+ #now create our directory
+ if selinux_enabled:
+ sid = selinux.get_sid(mysrc)
+ selinux.secure_mkdir(mydest,sid)
+ else:
+ os.mkdir(mydest)
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ os.chmod(mydest,mystat[0])
+ lchown(mydest,mystat[4],mystat[5])
+ print ">>>",mydest+"/"
+ else:
+ #destination doesn't exist
+ if selinux_enabled:
+ sid = selinux.get_sid(mysrc)
+ selinux.secure_mkdir(mydest,sid)
+ else:
+ os.mkdir(mydest)
+ os.chmod(mydest,mystat[0])
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, bsd_chflags.lgetflags(mysrc))
+ lchown(mydest,mystat[4],mystat[5])
+ print ">>>",mydest+"/"
+ outfile.write("dir "+myrealdest+"\n")
+ # recurse and merge this directory
+ if self.mergeme(srcroot,destroot,outfile,secondhand,offset+x+"/",cfgfiledict,thismtime):
+ return 1
+ elif stat.S_ISREG(mymode):
+ # we are merging a regular file
+ mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
+ # calculate config file protection stuff
+ mydestdir=os.path.dirname(mydest)
+ moveme=1
+ zing="!!!"
+ if mydmode!=None:
+ # destination file exists
+ if stat.S_ISDIR(mydmode):
+ # install of destination is blocked by an existing directory with the same name
+ moveme=0
+ print "!!!",mydest
+ elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
+ cfgprot=0
+ # install of destination is blocked by an existing regular file,
+ # or by a symlink to an existing regular file;
+ # now, config file management may come into play.
+ # we only need to tweak mydest if cfg file management is in play.
+ if myppath:
+ # we have a protection path; enable config file management.
+ destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
+ cycled=0
+ if cfgfiledict.has_key(myrealdest):
+ if destmd5 in cfgfiledict[myrealdest]:
+ #cycle
+ print "cycle"
+ del cfgfiledict[myrealdest]
+ cycled=1
+ if mymd5==destmd5:
+ #file already in place; simply update mtimes of destination
+ os.utime(mydest,(thismtime,thismtime))
+ zing="---"
+ moveme=0
+ elif cycled:
+ #mymd5!=destmd5 and we've cycled; move mysrc into place as a ._cfg file
+ moveme=1
+ cfgfiledict[myrealdest]=[mymd5]
+ cfgprot=1
+ elif cfgfiledict.has_key(myrealdest) and (mymd5 in cfgfiledict[myrealdest]):
+ #myd5!=destmd5, we haven't cycled, and the file we're merging has been already merged previously
+ zing="-o-"
+ moveme=cfgfiledict["IGNORE"]
+ cfgprot=cfgfiledict["IGNORE"]
+ else:
+ #mymd5!=destmd5, we haven't cycled, and the file we're merging hasn't been merged before
+ moveme=1
+ cfgprot=1
+ if not cfgfiledict.has_key(myrealdest):
+ cfgfiledict[myrealdest]=[]
+ if mymd5 not in cfgfiledict[myrealdest]:
+ cfgfiledict[myrealdest].append(mymd5)
+ #don't record more than 16 md5sums
+ if len(cfgfiledict[myrealdest])>16:
+ del cfgfiledict[myrealdest][0]
+
+ if cfgprot:
+ mydest = new_protect_filename(myrealdest, newmd5=mymd5)
+
+ # whether config protection or not, we merge the new file the
+ # same way. Unless moveme=0 (blocking directory)
+ if moveme:
+ mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
+ if mymtime == None:
+ sys.exit(1)
+ zing=">>>"
+ else:
+ mymtime=thismtime
+ # We need to touch the destination so that on --update the
+ # old package won't yank the file with it. (non-cfgprot related)
+ os.utime(myrealdest,(thismtime,thismtime))
+ zing="---"
+ if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
+
+ # XXX kludge, bug #58848; can be killed when portage stops relying on
+ # md5+mtime, and uses refcounts
+ # alright, we've fooled w/ mtime on the file; this pisses off static archives
+ # basically internal mtime != file's mtime, so the linker (falsely) thinks
+ # the archive is stale, and needs to have it's toc rebuilt.
+
+ myf=open(myrealdest,"r+")
+
+ # ar mtime field is digits padded with spaces, 12 bytes.
+ lms=str(thismtime+5).ljust(12)
+ myf.seek(0)
+ magic=myf.read(8)
+ if magic != "!<arch>\n":
+ # not an archive (dolib.a from portage.py makes it here fex)
+ myf.close()
+ else:
+ st=os.stat(myrealdest)
+ while myf.tell() < st.st_size - 12:
+ # skip object name
+ myf.seek(16,1)
+
+ # update mtime
+ myf.write(lms)
+
+ # skip uid/gid/mperm
+ myf.seek(20,1)
+
+ # read the archive member's size
+ x=long(myf.read(10))
+
+ # skip the trailing newlines, and add the potential
+ # extra padding byte if it's not an even size
+ myf.seek(x + 2 + (x % 2),1)
+
+ # and now we're at the end. yay.
+ myf.close()
+ mymd5=portage_checksum.perform_md5(myrealdest,calc_prelink=1)
+ os.utime(myrealdest,(thismtime,thismtime))
+
+ if mymtime!=None:
+ zing=">>>"
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ print zing,mydest
+ else:
+ # we are merging a fifo or device node
+ zing="!!!"
+ if mydmode==None:
+ # destination doesn't exist
+ if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
+ zing=">>>"
+ if stat.S_ISFIFO(mymode):
+ # we don't record device nodes in CONTENTS,
+ # although we do merge them.
+ outfile.write("fif "+myrealdest+"\n")
+ else:
+ sys.exit(1)
+ print zing+" "+mydest
+
+ def merge(self,mergeroot,inforoot,myroot,myebuild=None,cleanup=0):
+ return self.treewalk(mergeroot,myroot,inforoot,myebuild,cleanup=cleanup)
+
+ def getstring(self,name):
+ "returns contents of a file with whitespace converted to spaces"
+ if not os.path.exists(self.dbdir+"/"+name):
+ return ""
+ myfile=open(self.dbdir+"/"+name,"r")
+ mydata=string.split(myfile.read())
+ myfile.close()
+ return string.join(mydata," ")
+
+ def copyfile(self,fname):
+ shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
+
+ def getfile(self,fname):
+ if not os.path.exists(self.dbdir+"/"+fname):
+ return ""
+ myfile=open(self.dbdir+"/"+fname,"r")
+ mydata=myfile.read()
+ myfile.close()
+ return mydata
+
+ def setfile(self,fname,data):
+ myfile=open(self.dbdir+"/"+fname,"w")
+ myfile.write(data)
+ myfile.close()
+
+ def getelements(self,ename):
+ if not os.path.exists(self.dbdir+"/"+ename):
+ return []
+ myelement=open(self.dbdir+"/"+ename,"r")
+ mylines=myelement.readlines()
+ myreturn=[]
+ for x in mylines:
+ for y in string.split(x[:-1]):
+ myreturn.append(y)
+ myelement.close()
+ return myreturn
+
+ def setelements(self,mylist,ename):
+ myelement=open(self.dbdir+"/"+ename,"w")
+ for x in mylist:
+ myelement.write(x+"\n")
+ myelement.close()
+
+ def isregular(self):
+ "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
+ return os.path.exists(self.dbdir+"/CATEGORY")
+
+def cleanup_pkgmerge(mypkg,origdir):
+ shutil.rmtree(settings["PORTAGE_TMPDIR"]+"/portage-pkg/"+mypkg)
+ if os.path.exists(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment"):
+ os.unlink(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment")
+ os.chdir(origdir)
+
+def pkgmerge(mytbz2,myroot,mysettings):
+ """will merge a .tbz2 file, returning a list of runtime dependencies
+ that must be satisfied, or None if there was a merge error. This
+ code assumes the package exists."""
+ if mytbz2[-5:]!=".tbz2":
+ print "!!! Not a .tbz2 file"
+ return None
+ mypkg=os.path.basename(mytbz2)[:-5]
+ xptbz2=xpak.tbz2(mytbz2)
+ pkginfo={}
+ mycat=xptbz2.getfile("CATEGORY")
+ if not mycat:
+ print "!!! CATEGORY info missing from info chunk, aborting..."
+ return None
+ mycat=mycat.strip()
+ mycatpkg=mycat+"/"+mypkg
+ tmploc=mysettings["PORTAGE_TMPDIR"]+"/portage-pkg/"
+ pkgloc=tmploc+"/"+mypkg+"/bin/"
+ infloc=tmploc+"/"+mypkg+"/inf/"
+ myebuild=tmploc+"/"+mypkg+"/inf/"+os.path.basename(mytbz2)[:-4]+"ebuild"
+ if os.path.exists(tmploc+"/"+mypkg):
+ shutil.rmtree(tmploc+"/"+mypkg,1)
+ os.makedirs(pkgloc)
+ os.makedirs(infloc)
+ print ">>> extracting info"
+ xptbz2.unpackinfo(infloc)
+ # run pkg_setup early, so we can bail out early
+ # (before extracting binaries) if there's a problem
+ origdir=getcwd()
+ os.chdir(pkgloc)
+
+ mysettings.configdict["pkg"]["CATEGORY"] = mycat;
+ a=doebuild(myebuild,"setup",myroot,mysettings,tree="bintree")
+ print ">>> extracting",mypkg
+ notok=spawn("bzip2 -dqc -- '"+mytbz2+"' | tar xpf -",mysettings,free=1)
+ if notok:
+ print "!!! Error extracting",mytbz2
+ cleanup_pkgmerge(mypkg,origdir)
+ return None
+
+ # the merge takes care of pre/postinst and old instance
+ # auto-unmerge, virtual/provides updates, etc.
+ mysettings.load_infodir(infloc)
+ mylink=dblink(mycat,mypkg,myroot,mysettings)
+ mylink.merge(pkgloc,infloc,myroot,myebuild,cleanup=1)
+
+ if not os.path.exists(infloc+"/RDEPEND"):
+ returnme=""
+ else:
+ #get runtime dependencies
+ a=open(infloc+"/RDEPEND","r")
+ returnme=string.join(string.split(a.read())," ")
+ a.close()
+ cleanup_pkgmerge(mypkg,origdir)
+ return returnme
+
+
+if os.environ.has_key("ROOT"):
+ root=os.environ["ROOT"]
+ if not len(root):
+ root="/"
+ elif root[-1]!="/":
+ root=root+"/"
+else:
+ root="/"
+if root != "/":
+ if not os.path.exists(root[:-1]):
+ writemsg("!!! Error: ROOT "+root+" does not exist. Please correct this.\n")
+ writemsg("!!! Exiting.\n\n")
+ sys.exit(1)
+ elif not os.path.isdir(root[:-1]):
+ writemsg("!!! Error: ROOT "+root[:-1]+" is not a directory. Please correct this.\n")
+ writemsg("!!! Exiting.\n\n")
+ sys.exit(1)
+
+#create tmp and var/tmp if they don't exist; read config
+os.umask(0)
+if not os.path.exists(root+"tmp"):
+ writemsg(">>> "+root+"tmp doesn't exist, creating it...\n")
+ os.mkdir(root+"tmp",01777)
+if not os.path.exists(root+"var/tmp"):
+ writemsg(">>> "+root+"var/tmp doesn't exist, creating it...\n")
+ try:
+ os.mkdir(root+"var",0755)
+ except (OSError,IOError):
+ pass
+ try:
+ os.mkdir(root+"var/tmp",01777)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("portage: couldn't create /var/tmp; exiting.\n")
+ sys.exit(1)
+if not os.path.exists(root+"var/lib/portage"):
+ writemsg(">>> "+root+"var/lib/portage doesn't exist, creating it...\n")
+ try:
+ os.mkdir(root+"var",0755)
+ except (OSError,IOError):
+ pass
+ try:
+ os.mkdir(root+"var/lib",0755)
+ except (OSError,IOError):
+ pass
+ try:
+ os.mkdir(root+"var/lib/portage",02750)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("portage: couldn't create /var/lib/portage; exiting.\n")
+ sys.exit(1)
+
+
+#####################################
+# Deprecation Checks
+
+os.umask(022)
+profiledir=None
+if "PORTAGE_CALLER" in os.environ and os.environ["PORTAGE_CALLER"] == "emerge" and os.path.isdir(PROFILE_PATH):
+ profiledir = PROFILE_PATH
+ if os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
+ deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
+ dcontent = deprecatedfile.readlines()
+ deprecatedfile.close()
+ newprofile = dcontent[0]
+ writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"))
+ writemsg(red("!!! Please upgrade to the following profile if possible:\n"))
+ writemsg(8*" "+green(newprofile)+"\n")
+ if len(dcontent) > 1:
+ writemsg("To upgrade do the following steps:\n")
+ for myline in dcontent[1:]:
+ writemsg(myline)
+ writemsg("\n\n")
+
+if os.path.exists(USER_VIRTUALS_FILE):
+ writemsg(red("\n!!! /etc/portage/virtuals is deprecated in favor of\n"))
+ writemsg(red("!!! /etc/portage/profile/virtuals. Please move it to\n"))
+ writemsg(red("!!! this new location.\n\n"))
+
+#
+#####################################
+
+db={}
+
+# =============================================================================
+# =============================================================================
+# -----------------------------------------------------------------------------
+# We're going to lock the global config to prevent changes, but we need
+# to ensure the global settings are right.
+settings=config(config_profile_path=PROFILE_PATH,config_incrementals=portage_const.INCREMENTALS)
+
+# useful info
+settings["PORTAGE_MASTER_PID"]=str(os.getpid())
+settings.backup_changes("PORTAGE_MASTER_PID")
+# We are disabling user-specific bashrc files.
+settings["BASH_ENV"] = INVALID_ENV_FILE
+settings.backup_changes("BASH_ENV")
+
+# gets virtual package settings
+def getvirtuals(myroot):
+ global settings
+ writemsg("--- DEPRECATED call to getvirtual\n")
+ return settings.getvirtuals(myroot)
+
+def do_vartree(mysettings):
+ global virts,virts_p
+ virts=mysettings.getvirtuals("/")
+ virts_p={}
+
+ if virts:
+ myvkeys=virts.keys()
+ for x in myvkeys:
+ vkeysplit=x.split("/")
+ if not virts_p.has_key(vkeysplit[1]):
+ virts_p[vkeysplit[1]]=virts[x]
+ db["/"]={"virtuals":virts,"vartree":vartree("/",virts)}
+ if root!="/":
+ virts=mysettings.getvirtuals(root)
+ db[root]={"virtuals":virts,"vartree":vartree(root,virts)}
+ #We need to create the vartree first, then load our settings, and then set up our other trees
+
+usedefaults=settings.use_defs
+
+# XXX: This is a circular fix.
+#do_vartree(settings)
+#settings.loadVirtuals('/')
+do_vartree(settings)
+#settings.loadVirtuals('/')
+
+settings.reset() # XXX: Regenerate use after we get a vartree -- GLOBAL
+
+
+# XXX: Might cause problems with root="/" assumptions
+portdb=portdbapi(settings["PORTDIR"])
+
+settings.lock()
+# -----------------------------------------------------------------------------
+# =============================================================================
+# =============================================================================
+
+
+if 'selinux' in settings["USE"].split(" "):
+ try:
+ import selinux
+ selinux_enabled=1
+ except OSError, e:
+ writemsg(red("!!! SELinux not loaded: ")+str(e)+"\n")
+ selinux_enabled=0
+ except ImportError:
+ writemsg(red("!!! SELinux module not found.")+" Please verify that it was installed.\n")
+ selinux_enabled=0
+else:
+ selinux_enabled=0
+
+cachedirs=[CACHE_PATH]
+if root!="/":
+ cachedirs.append(root+CACHE_PATH)
+if not os.environ.has_key("SANDBOX_ACTIVE"):
+ for cachedir in cachedirs:
+ if not os.path.exists(cachedir):
+ os.makedirs(cachedir,0755)
+ writemsg(">>> "+cachedir+" doesn't exist, creating it...\n")
+ if not os.path.exists(cachedir+"/dep"):
+ os.makedirs(cachedir+"/dep",2755)
+ writemsg(">>> "+cachedir+"/dep doesn't exist, creating it...\n")
+ try:
+ os.chown(cachedir,uid,portage_gid)
+ os.chmod(cachedir,0775)
+ except OSError:
+ pass
+ try:
+ mystat=os.lstat(cachedir+"/dep")
+ os.chown(cachedir+"/dep",uid,portage_gid)
+ os.chmod(cachedir+"/dep",02775)
+ if mystat[stat.ST_GID]!=portage_gid:
+ spawn("chown -R "+str(uid)+":"+str(portage_gid)+" "+cachedir+"/dep",settings,free=1)
+ spawn("chmod -R u+rw,g+rw "+cachedir+"/dep",settings,free=1)
+ except OSError:
+ pass
+
+def flushmtimedb(record):
+ if mtimedb:
+ if record in mtimedb.keys():
+ del mtimedb[record]
+ #print "mtimedb["+record+"] is cleared."
+ else:
+ writemsg("Invalid or unset record '"+record+"' in mtimedb.\n")
+
+#grab mtimes for eclasses and upgrades
+mtimedb={}
+mtimedbkeys=[
+"updates", "info",
+"version", "starttime",
+"resume", "ldpath"
+]
+mtimedbfile=root+"var/cache/edb/mtimedb"
+try:
+ mypickle=cPickle.Unpickler(open(mtimedbfile))
+ mypickle.find_global=None
+ mtimedb=mypickle.load()
+ if mtimedb.has_key("old"):
+ mtimedb["updates"]=mtimedb["old"]
+ del mtimedb["old"]
+ if mtimedb.has_key("cur"):
+ del mtimedb["cur"]
+except SystemExit, e:
+ raise
+except:
+ #print "!!!",e
+ mtimedb={"updates":{},"version":"","starttime":0}
+
+for x in mtimedb.keys():
+ if x not in mtimedbkeys:
+ writemsg("Deleting invalid mtimedb key: "+str(x)+"\n")
+ del mtimedb[x]
+
+#,"porttree":portagetree(root,virts),"bintree":binarytree(root,virts)}
+features=settings["FEATURES"].split()
+
+do_upgrade_packagesmessage=0
+def do_upgrade(mykey):
+ global do_upgrade_packagesmessage
+ writemsg("\n\n")
+ writemsg(green("Performing Global Updates: ")+bold(mykey)+"\n")
+ writemsg("(Could take a couple of minutes if you have a lot of binary packages.)\n")
+ writemsg(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
+ processed=1
+ #remove stale virtual entries (mappings for packages that no longer exist)
+
+ update_files={}
+ file_contents={}
+ myxfiles = ["package.mask","package.unmask","package.keywords","package.use"]
+ myxfiles = myxfiles + prefix_array(myxfiles, "profile/")
+ for x in myxfiles:
+ try:
+ myfile = open("/etc/portage/"+x,"r")
+ file_contents[x] = myfile.readlines()
+ myfile.close()
+ except IOError:
+ if file_contents.has_key(x):
+ del file_contents[x]
+ continue
+
+ worldlist=grabfile("/"+WORLD_FILE)
+ myupd=grabfile(mykey)
+ db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
+ for myline in myupd:
+ mysplit=myline.split()
+ if not len(mysplit):
+ continue
+ if mysplit[0]!="move" and mysplit[0]!="slotmove":
+ writemsg("portage: Update type \""+mysplit[0]+"\" not recognized.\n")
+ processed=0
+ continue
+ if mysplit[0]=="move" and len(mysplit)!=3:
+ writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
+ processed=0
+ continue
+ if mysplit[0]=="slotmove" and len(mysplit)!=4:
+ writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
+ processed=0
+ continue
+ sys.stdout.write(".")
+ sys.stdout.flush()
+
+ if mysplit[0]=="move":
+ db["/"]["vartree"].dbapi.move_ent(mysplit)
+ db["/"]["bintree"].move_ent(mysplit)
+ #update world entries:
+ for x in range(0,len(worldlist)):
+ #update world entries, if any.
+ worldlist[x]=dep_transform(worldlist[x],mysplit[1],mysplit[2])
+
+ #update /etc/portage/packages.*
+ for x in file_contents:
+ for mypos in range(0,len(file_contents[x])):
+ line=file_contents[x][mypos]
+ if line[0]=="#" or string.strip(line)=="":
+ continue
+ key=dep_getkey(line.split()[0])
+ if key==mysplit[1]:
+ file_contents[x][mypos]=string.replace(line,mysplit[1],mysplit[2])
+ update_files[x]=1
+ sys.stdout.write("p")
+ sys.stdout.flush()
+
+ elif mysplit[0]=="slotmove":
+ db["/"]["vartree"].dbapi.move_slot_ent(mysplit)
+ db["/"]["bintree"].move_slot_ent(mysplit,settings["PORTAGE_TMPDIR"]+"/tbz2")
+
+ for x in update_files:
+ mydblink = dblink('','','/',settings)
+ if mydblink.isprotected("/etc/portage/"+x):
+ updating_file=new_protect_filename("/etc/portage/"+x)[0]
+ else:
+ updating_file="/etc/portage/"+x
+ try:
+ myfile=open(updating_file,"w")
+ myfile.writelines(file_contents[x])
+ myfile.close()
+ except IOError:
+ continue
+
+ # We gotta do the brute force updates for these now.
+ if (settings["PORTAGE_CALLER"] in ["fixpackages"]) or \
+ ("fixpackages" in features):
+ db["/"]["bintree"].update_ents(myupd,settings["PORTAGE_TMPDIR"]+"/tbz2")
+ else:
+ do_upgrade_packagesmessage = 1
+
+ if processed:
+ #update our internal mtime since we processed all our directives.
+ mtimedb["updates"][mykey]=os.stat(mykey)[stat.ST_MTIME]
+ myworld=open("/"+WORLD_FILE,"w")
+ for x in worldlist:
+ myworld.write(x+"\n")
+ myworld.close()
+ print ""
+
+def portageexit():
+ global uid,portage_gid,portdb,db
+ if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
+ # wait child process death
+ try:
+ while True:
+ os.wait()
+ except OSError:
+ #writemsg(">>> All child process are now dead.")
+ pass
+
+ close_portdbapi_caches()
+
+ if mtimedb:
+ # Store mtimedb
+ mymfn=mtimedbfile
+ try:
+ mtimedb["version"]=VERSION
+ cPickle.dump(mtimedb, open(mymfn,"w"), -1)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+
+ try:
+ os.chown(mymfn,uid,portage_gid)
+ os.chmod(mymfn,0664)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+
+atexit.register(portageexit)
+
+if (secpass==2) and (not os.environ.has_key("SANDBOX_ACTIVE")):
+ if settings["PORTAGE_CALLER"] in ["emerge","fixpackages"]:
+ #only do this if we're root and not running repoman/ebuild digest
+ updpath=os.path.normpath(settings["PORTDIR"]+"///profiles/updates")
+ didupdate=0
+ if not mtimedb.has_key("updates"):
+ mtimedb["updates"]={}
+ try:
+ mylist=listdir(updpath,EmptyOnError=1)
+ # resort the list
+ mylist=[myfile[3:]+"-"+myfile[:2] for myfile in mylist]
+ mylist.sort()
+ mylist=[myfile[5:]+"-"+myfile[:4] for myfile in mylist]
+ for myfile in mylist:
+ mykey=updpath+"/"+myfile
+ if not os.path.isfile(mykey):
+ continue
+ if (not mtimedb["updates"].has_key(mykey)) or \
+ (mtimedb["updates"][mykey] != os.stat(mykey)[stat.ST_MTIME]) or \
+ (settings["PORTAGE_CALLER"] == "fixpackages"):
+ didupdate=1
+ do_upgrade(mykey)
+ portageexit() # This lets us save state for C-c.
+ except OSError:
+ #directory doesn't exist
+ pass
+ if didupdate:
+ #make sure our internal databases are consistent; recreate our virts and vartree
+ do_vartree(settings)
+ if do_upgrade_packagesmessage and \
+ listdir(settings["PKGDIR"]+"/All/",EmptyOnError=1):
+ writemsg("\n\n\n ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
+ writemsg("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
+ writemsg("\n")
+
+
+
+
+
+#continue setting up other trees
+db["/"]["porttree"]=portagetree("/",virts)
+db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
+if root!="/":
+ db[root]["porttree"]=portagetree(root,virts)
+ db[root]["bintree"]=binarytree(root,settings["PKGDIR"],virts)
+thirdpartymirrors=grabdict(settings["PORTDIR"]+"/profiles/thirdpartymirrors")
+
+if not os.path.exists(settings["PORTAGE_TMPDIR"]):
+ writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
+ writemsg("does not exist. Please create this directory or correct your PORTAGE_TMPDIR setting.\n")
+ sys.exit(1)
+if not os.path.isdir(settings["PORTAGE_TMPDIR"]):
+ writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
+ writemsg("is not a directory. Please correct your PORTAGE_TMPDIR setting.\n")
+ sys.exit(1)
+
+# COMPATABILITY -- This shouldn't be used.
+pkglines = settings.packages
+
+groups=settings["ACCEPT_KEYWORDS"].split()
+archlist=[]
+for myarch in grabfile(settings["PORTDIR"]+"/profiles/arch.list"):
+ archlist += [myarch,"~"+myarch]
+for group in groups:
+ if not archlist:
+ writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
+ break
+ elif (group not in archlist) and group[0]!='-':
+ writemsg("\n"+red("!!! INVALID ACCEPT_KEYWORDS: ")+str(group)+"\n")
+
+# Clear the cache
+dircache={}
+
+if not os.path.islink(PROFILE_PATH) and os.path.exists(settings["PORTDIR"]+"/profiles"):
+ writemsg(red("\a\n\n!!! "+PROFILE_PATH+" is not a symlink and will probably prevent most merges.\n"))
+ writemsg(red("!!! It should point into a profile within %s/profiles/\n" % settings["PORTDIR"]))
+ writemsg(red("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
+ time.sleep(3)
+
+# ============================================================================
+# ============================================================================
+
diff --git a/pym/portage_checksum.py b/pym/portage_checksum.py
new file mode 100644
index 000000000..cefcda6db
--- /dev/null
+++ b/pym/portage_checksum.py
@@ -0,0 +1,134 @@
+# portage_checksum.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_checksum.py,v 1.10.2.2 2005/08/10 05:42:03 ferringb Exp $
+cvs_id_string="$Id: portage_checksum.py,v 1.10.2.2 2005/08/10 05:42:03 ferringb Exp $"[5:-2]
+
+from portage_const import PRIVATE_PATH,PRELINK_BINARY
+import os
+import shutil
+import stat
+import portage_exec
+import portage_util
+import portage_locks
+import commands
+import sha
+
+prelink_capable = False
+if os.path.exists(PRELINK_BINARY):
+ results = commands.getstatusoutput(PRELINK_BINARY+" --version > /dev/null 2>&1")
+ if (results[0] >> 8) == 0:
+ prelink_capable=1
+ del results
+
+def perform_md5(x, calc_prelink=0):
+ return perform_checksum(x, md5hash, calc_prelink)[0]
+
+def perform_sha1(x, calc_prelink=0):
+ return perform_checksum(x, sha1hash, calc_prelink)[0]
+
+def perform_all(x, calc_prelink=0):
+ mydict = {}
+ mydict["SHA1"] = perform_sha1(x, calc_prelink)
+ mydict["MD5"] = perform_md5(x, calc_prelink)
+ return mydict
+
+def get_valid_checksum_keys():
+ return ["SHA1", "MD5"]
+
+def verify_all(filename, mydict, calc_prelink=0, strict=0):
+ # Dict relates to single file only.
+ # returns: (passed,reason)
+ file_is_ok = True
+ reason = "Reason unknown"
+ try:
+ if mydict["size"] != os.stat(filename)[stat.ST_SIZE]:
+ return False,"Filesize does not match recorded size"
+ except OSError, e:
+ return False, str(e)
+ for x in mydict.keys():
+ if x == "size":
+ continue
+ elif x == "SHA1":
+ if mydict[x] != perform_sha1(filename, calc_prelink=calc_prelink):
+ if strict:
+ raise portage_exception.DigestException, "Failed to verify '$(file)s' on checksum type '%(type)s'" % {"file":filename, "type":x}
+ else:
+ file_is_ok = False
+ reason = "Failed on %s verification" % (x,)
+ break
+ elif x == "MD5":
+ if mydict[x] != perform_md5(filename, calc_prelink=calc_prelink):
+ if strict:
+ raise portage_exception.DigestException, "Failed to verify '$(file)s' on checksum type '%(type)s'" % {"file":filename, "type":x}
+ else:
+ file_is_ok = False
+ reason = "Failed on %s verification" % (x,)
+ break
+ return file_is_ok,reason
+
+# We _try_ to load this module. If it fails we do the slow fallback.
+try:
+ import fchksum
+
+ def md5hash(filename):
+ return fchksum.fmd5t(filename)
+
+except ImportError:
+ import md5
+ def md5hash(filename):
+ f = open(filename, 'rb')
+ blocksize=32768
+ data = f.read(blocksize)
+ size = 0L
+ sum = md5.new()
+ while data:
+ sum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+ f.close()
+
+ return (sum.hexdigest(),size)
+
+def sha1hash(filename):
+ f = open(filename, 'rb')
+ blocksize=32768
+ data = f.read(blocksize)
+ size = 0L
+ sum = sha.new()
+ while data:
+ sum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+ f.close()
+
+ return (sum.hexdigest(),size)
+
+def perform_checksum(filename, hash_function=md5hash, calc_prelink=0):
+ myfilename = filename[:]
+ prelink_tmpfile = PRIVATE_PATH+"/prelink-checksum.tmp."+str(os.getpid())
+ mylock = None
+
+ if calc_prelink and prelink_capable:
+ mylock = portage_locks.lockfile(prelink_tmpfile, wantnewlockfile=1)
+ # Create non-prelinked temporary file to md5sum.
+ # Raw data is returned on stdout, errors on stderr.
+ # Non-prelinks are just returned.
+ try:
+ shutil.copy2(filename,prelink_tmpfile)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ portage_util.writemsg("!!! Unable to copy file '"+str(filename)+"'.\n")
+ portage_util.writemsg("!!! "+str(e)+"\n")
+ raise
+ portage_exec.spawn(PRELINK_BINARY+" --undo "+prelink_tmpfile,fd_pipes={})
+ myfilename=prelink_tmpfile
+
+ myhash, mysize = hash_function(myfilename)
+
+ if calc_prelink and prelink_capable:
+ os.unlink(prelink_tmpfile)
+ portage_locks.unlockfile(mylock)
+
+ return (myhash,mysize)
diff --git a/pym/portage_const.py b/pym/portage_const.py
new file mode 100644
index 000000000..40ad3cf17
--- /dev/null
+++ b/pym/portage_const.py
@@ -0,0 +1,48 @@
+# portage: Constants
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_const.py,v 1.3.2.3 2005/04/29 04:56:35 jstubbs Exp $
+cvs_id_string="$Id: portage_const.py,v 1.3.2.3 2005/04/29 04:56:35 jstubbs Exp $"[5:-2]
+
+# ===========================================================================
+# START OF CONSTANTS -- START OF CONSTANTS -- START OF CONSTANTS -- START OF
+# ===========================================================================
+
+VDB_PATH = "var/db/pkg"
+PRIVATE_PATH = "/var/lib/portage"
+CACHE_PATH = "/var/cache/edb"
+DEPCACHE_PATH = CACHE_PATH+"/dep"
+
+USER_CONFIG_PATH = "/etc/portage"
+MODULES_FILE_PATH = USER_CONFIG_PATH+"/modules"
+CUSTOM_PROFILE_PATH = USER_CONFIG_PATH+"/profile"
+
+PORTAGE_BASE_PATH = "/usr/lib/portage"
+PORTAGE_BIN_PATH = PORTAGE_BASE_PATH+"/bin"
+PORTAGE_PYM_PATH = PORTAGE_BASE_PATH+"/pym"
+PROFILE_PATH = "/etc/make.profile"
+LOCALE_DATA_PATH = PORTAGE_BASE_PATH+"/locale"
+
+EBUILD_SH_BINARY = PORTAGE_BIN_PATH+"/ebuild.sh"
+SANDBOX_BINARY = "/usr/bin/sandbox"
+BASH_BINARY = "/bin/bash"
+MOVE_BINARY = "/bin/mv"
+PRELINK_BINARY = "/usr/sbin/prelink"
+
+WORLD_FILE = PRIVATE_PATH+"/world"
+MAKE_CONF_FILE = "/etc/make.conf"
+MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults"
+DEPRECATED_PROFILE_FILE = PROFILE_PATH+"/deprecated"
+USER_VIRTUALS_FILE = USER_CONFIG_PATH+"/virtuals"
+EBUILD_SH_ENV_FILE = USER_CONFIG_PATH+"/bashrc"
+INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env"
+CUSTOM_MIRRORS_FILE = USER_CONFIG_PATH+"/mirrors"
+SANDBOX_PIDS_FILE = "/tmp/sandboxpids.tmp"
+CONFIG_MEMORY_FILE = PRIVATE_PATH + "/config"
+
+INCREMENTALS=["USE","FEATURES","ACCEPT_KEYWORDS","ACCEPT_LICENSE","CONFIG_PROTECT_MASK","CONFIG_PROTECT","PRELINK_PATH","PRELINK_PATH_MASK"]
+STICKIES=["KEYWORDS_ACCEPT","USE","CFLAGS","CXXFLAGS","MAKEOPTS","EXTRA_ECONF","EXTRA_EINSTALL","EXTRA_EMAKE"]
+
+# ===========================================================================
+# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
+# ===========================================================================
diff --git a/pym/portage_contents.py b/pym/portage_contents.py
new file mode 100644
index 000000000..0f0b24204
--- /dev/null
+++ b/pym/portage_contents.py
@@ -0,0 +1,161 @@
+# portage_contents.py -- (Persistent) Contents File Management
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_contents.py,v 1.3.2.1 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: portage_contents.py,v 1.3.2.1 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+import os,string,types,sys,copy
+import portage_exception
+import portage_const
+
+#import gettext
+#gettext_t = gettext.translation('portage.contents', portage_const.LOCALE_DATA_PATH)
+#_ = gettext_t.ugettext
+def _(mystr):
+ return mystr
+
+
+FILES_KEY = "\0FILES\0"
+OWNER_KEY = "\0OWNER\0"
+
+
+def ContentsHandler(filename):
+ infile = open(filename)
+ myfiles = []
+ mydirs = []
+
+ mylines = infile.readlines()
+ infile.close()
+ for line in mylines:
+ if line[-1] == '\n':
+ line = line[:-1]
+ parts = string.split(line)
+
+ mytype = parts[0]
+ mytarget = None
+
+ if mytype in ["dir","dev","fif"]:
+ mypath = string.join(parts[1:])
+ elif mytype == "obj":
+ mypath = string.join(parts[1:-2])
+ elif mytype == "sym":
+ sl = string.join(parts[1:-1])
+ sl = string.split(sl, " -> ")
+
+ mypath = sl[0]
+ mytarget = sl[1]
+ else:
+ print _("Unknown type:"),mytype
+
+ if mytype in ["dir"]:
+ mydirs.append(mypath)
+ else:
+ myfiles.append(mypath)
+
+ mydirs.sort()
+ myfiles.sort()
+ return myfiles,mydirs
+
+class PathLookupTable:
+ """Creates a temporary lookup table for paths from files."""
+ def __init__(self,dbname):
+ #if not self.validLocation(dbname):
+ # raise portage_exception.InvalidLocation, dbname
+ #self.base = copy.deepcopy(dbname)
+
+ self.files = []
+ self.pathtree = {}
+
+ def addFromFile(self, filename, handler):
+ if type(handler) != types.FunctionType:
+ raise portage_exception.IncorrectParameter, _("Handler of type '%(type)s' not 'function'") % {"type": type(handler)}
+
+ filelist,dirlist = handler(filename)
+ filestat = os.stat(filename)
+
+ if type(filelist) != types.ListType:
+ raise portage_exception.InvalidDataType, _("%(handler)s returned an invalid file list") % {"handler": handler.__name__}
+ if type(dirlist) != types.ListType:
+ raise portage_exception.InvalidDataType, _("%(handler)s returned an invalid directory list") % {"handler": handler.__name__}
+
+ for x in filelist:
+ if not x:
+ continue
+ x = os.path.normpath(x)
+ if len(x) > 1:
+ if x[:2] == "//":
+ x = x[1:]
+ if type(x) != types.StringType:
+ raise portage_exception.InvalidDataType, _("%(handler)s returned an invalid subelement in dataset") % {"handler": handler.__name__}
+ xs = string.split(x, "/")
+ self.addFilePath(xs,filename)
+
+ for x in dirlist:
+ if not x:
+ continue
+ x = os.path.normpath(x)
+ if len(x) > 1:
+ if x[:2] == "//":
+ x = x[1:]
+ if type(x) != types.StringType:
+ raise portage_exception.InvalidDataType, _("%(handler)s returned an invalid subelement in dataset") % {"handler": handler.__name__}
+ xs = string.split(x, "/")
+ self.addDirectoryPath(xs,filename)
+
+ def addDirectoryPath(self,split_path, owner):
+ pt = self.pathtree
+ for x in split_path:
+ if x not in pt.keys():
+ pt[x] = {FILES_KEY:{},OWNER_KEY:[]}
+ if owner not in pt[x][OWNER_KEY]:
+ pt[x][OWNER_KEY].append(owner[:])
+ pt = pt[x]
+ return pt
+
+ def addFilePath(self,split_path, owner):
+ pt = self.addDirectoryPath(split_path[:-1], owner)
+ if split_path[-1] not in pt[FILES_KEY]:
+ pt[FILES_KEY][split_path[-1][:]] = []
+ if owner not in pt[FILES_KEY][split_path[-1][:]]:
+ pt[FILES_KEY][split_path[-1][:]].append(owner[:])
+
+ def whoProvides(self,path):
+ if type(path) != types.StringType:
+ raise portage_exception.InvalidData, _("Path passed is not a string: %(path)s") % {"path": path}
+ x = os.path.normpath(path)
+ if x[0:2] == '//':
+ x = x[1:]
+
+ xs = x.split("/")
+ pt = self.pathtree
+ final_dir = xs.pop(-1)
+ for subpath in xs:
+ if subpath in pt.keys():
+ pt = pt[subpath]
+
+ owners = []
+ if final_dir in pt[FILES_KEY]:
+ for x in pt[FILES_KEY][final_dir]:
+ if x not in owners:
+ owners.append(x[:])
+ if final_dir in pt:
+ for x in pt[final_dir][OWNER_KEY]:
+ if x not in owners:
+ owners.append(x[:])
+
+ return owners
+
+
+
+def test():
+ import os
+ plt = PathLookupTable("spork")
+ for x in os.listdir("/var/db/pkg"):
+ for y in os.listdir("/var/db/pkg/"+x):
+ c_path = "/var/db/pkg/"+x+"/"+y+"/CONTENTS"
+ if os.path.exists(c_path):
+ plt.addFromFile(c_path, ContentsHandler)
+ print "/bin/bash:", plt.whoProvides("/bin/bash")
+ print "/var/spool:", plt.whoProvides("/var/spool")
+ print "/etc/init.d", plt.whoProvides("/etc/init.d")
+ return plt
diff --git a/pym/portage_data.py b/pym/portage_data.py
new file mode 100644
index 000000000..6854655a5
--- /dev/null
+++ b/pym/portage_data.py
@@ -0,0 +1,85 @@
+# portage_data.py -- Calculated/Discovered Data Values
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_data.py,v 1.5.2.2 2005/02/26 11:22:38 carpaski Exp $
+cvs_id_string="$Id: portage_data.py,v 1.5.2.2 2005/02/26 11:22:38 carpaski Exp $"[5:-2]
+
+import os,pwd,grp
+from portage_util import writemsg
+from output import green,red
+
+ostype=os.uname()[0]
+
+lchown = None
+if ostype=="Linux" or ostype.lower().endswith("gnu"):
+ userland="GNU"
+ os.environ["XARGS"]="xargs -r"
+elif ostype == "Darwin":
+ userland="Darwin"
+ os.environ["XARGS"]="xargs"
+ lchown=os.chown
+elif ostype in ["FreeBSD","OpenBSD"]:
+ userland="BSD"
+ os.environ["XARGS"]="xargs"
+else:
+ writemsg(red("Operating system")+" \""+ostype+"\" "+red("currently unsupported. Exiting.")+"\n")
+ sys.exit(1)
+
+if not lchown:
+ if "lchown" in dir(os):
+ # Included in python-2.3
+ lchown = os.lchown
+ else:
+ import missingos
+ lchown = missingos.lchown
+
+
+
+os.environ["USERLAND"]=userland
+
+#Secpass will be set to 1 if the user is root or in the portage group.
+secpass=0
+
+uid=os.getuid()
+wheelgid=0
+
+if uid==0:
+ secpass=2
+try:
+ wheelgid=grp.getgrnam("wheel")[2]
+ if (not secpass) and (wheelgid in os.getgroups()):
+ secpass=1
+except KeyError:
+ writemsg("portage initialization: your system doesn't have a 'wheel' group.\n")
+ writemsg("Please fix this as it is a normal system requirement. 'wheel' is GID 10\n")
+ writemsg("'emerge baselayout' and an 'etc-update' should remedy this problem.\n")
+ pass
+
+#Discover the uid and gid of the portage user/group
+try:
+ portage_uid=pwd.getpwnam("portage")[2]
+ portage_gid=grp.getgrnam("portage")[2]
+ if (secpass==0):
+ secpass=1
+except KeyError:
+ portage_uid=0
+ portage_gid=wheelgid
+ writemsg("\n")
+ writemsg( red("portage: 'portage' user or group missing. Please update baselayout\n"))
+ writemsg( red(" and merge portage user(250) and group(250) into your passwd\n"))
+ writemsg( red(" and group files. Non-root compilation is disabled until then.\n"))
+ writemsg( " Also note that non-root/wheel users will need to be added to\n")
+ writemsg( " the portage group to do portage commands.\n")
+ writemsg("\n")
+ writemsg( " For the defaults, line 1 goes into passwd, and 2 into group.\n")
+ writemsg(green(" portage:x:250:250:portage:/var/tmp/portage:/bin/false\n"))
+ writemsg(green(" portage::250:portage\n"))
+ writemsg("\n")
+
+if (uid!=0) and (portage_gid not in os.getgroups()):
+ writemsg("\n")
+ writemsg(red("*** You are not in the portage group. You may experience cache problems\n"))
+ writemsg(red("*** due to permissions preventing the creation of the on-disk cache.\n"))
+ writemsg(red("*** Please add this user to the portage group if you wish to use portage.\n"))
+ writemsg("\n")
+
diff --git a/pym/portage_db_anydbm.py b/pym/portage_db_anydbm.py
new file mode 100644
index 000000000..2b0b298c5
--- /dev/null
+++ b/pym/portage_db_anydbm.py
@@ -0,0 +1,64 @@
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/Attic/portage_db_anydbm.py,v 1.11.2.1 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: portage_db_anydbm.py,v 1.11.2.1 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+import anydbm,cPickle,types,os
+
+import portage_db_template
+
+class database(portage_db_template.database):
+ def module_init(self):
+ prevmask=os.umask(0)
+ if not os.path.exists(self.path):
+ current_path="/"
+ for mydir in self.path.split("/"):
+ current_path += "/"+mydir
+ if not os.path.exists(current_path):
+ os.mkdir(current_path)
+
+ self.filename = self.path + "/" + self.category + ".anydbm"
+
+ try:
+ # open it read/write
+ self.db = anydbm.open(self.filename, "c", 0664)
+ except SystemExit, e:
+ raise
+ except:
+ # Create a new db... DB type not supported anymore?
+ self.db = anydbm.open(self.filename, "n", 0664)
+
+ os.umask(prevmask)
+
+ def has_key(self,key):
+ self.check_key(key)
+ if self.db.has_key(key):
+ return 1
+ return 0
+
+ def keys(self):
+ return self.db.keys()
+
+ def get_values(self,key):
+ self.check_key(key)
+ if self.db.has_key(key):
+ myval = cPickle.loads(self.db[key])
+ return myval
+ return None
+
+ def set_values(self,key,val):
+ self.check_key(key)
+ self.db[key] = cPickle.dumps(val,cPickle.HIGHEST_PROTOCOL)
+
+ def del_key(self,key):
+ if self.has_key(key):
+ del self.db[key]
+ return True
+ return False
+
+ def sync(self):
+ self.db.sync()
+
+ def close(self):
+ self.db.close()
+
diff --git a/pym/portage_db_cpickle.py b/pym/portage_db_cpickle.py
new file mode 100644
index 000000000..beaf4b615
--- /dev/null
+++ b/pym/portage_db_cpickle.py
@@ -0,0 +1,79 @@
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/Attic/portage_db_cpickle.py,v 1.9.2.2 2005/04/23 07:26:04 jstubbs Exp $
+cvs_id_string="$Id: portage_db_cpickle.py,v 1.9.2.2 2005/04/23 07:26:04 jstubbs Exp $"[5:-2]
+
+import anydbm,cPickle,types
+from os import chown,access,R_OK,unlink
+import os
+
+import portage_db_template
+
+class database(portage_db_template.database):
+ def module_init(self):
+ self.modified = False
+
+ prevmask=os.umask(0)
+ if not os.path.exists(self.path):
+ os.makedirs(self.path, 02775)
+
+ self.filename = self.path + "/" + self.category + ".cpickle"
+
+ if access(self.filename, R_OK):
+ try:
+ mypickle=cPickle.Unpickler(open(self.filename,"r"))
+ mypickle.find_global=None
+ self.db = mypickle.load()
+ except SystemExit, e:
+ raise
+ except:
+ self.db = {}
+ else:
+ self.db = {}
+
+ os.umask(prevmask)
+
+ def has_key(self,key):
+ self.check_key(key)
+ if self.db.has_key(key):
+ return 1
+ return 0
+
+ def keys(self):
+ return self.db.keys()
+
+ def get_values(self,key):
+ self.check_key(key)
+ if self.db.has_key(key):
+ return self.db[key]
+ return None
+
+ def set_values(self,key,val):
+ self.modified = True
+ self.check_key(key)
+ self.db[key] = val
+
+ def del_key(self,key):
+ if self.has_key(key):
+ del self.db[key]
+ self.modified = True
+ return True
+ return False
+
+ def sync(self):
+ if self.modified:
+ try:
+ if os.path.exists(self.filename):
+ unlink(self.filename)
+ cPickle.dump(self.db, open(self.filename,"w"), -1)
+ os.chown(self.filename,self.uid,self.gid)
+ os.chmod(self.filename, 0664)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+
+ def close(self):
+ self.sync()
+ self.db = None;
+
diff --git a/pym/portage_db_flat.py b/pym/portage_db_flat.py
new file mode 100644
index 000000000..b6cb46415
--- /dev/null
+++ b/pym/portage_db_flat.py
@@ -0,0 +1,113 @@
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/Attic/portage_db_flat.py,v 1.13.2.6 2005/04/19 07:14:17 ferringb Exp $
+cvs_id_string="$Id: portage_db_flat.py,v 1.13.2.6 2005/04/19 07:14:17 ferringb Exp $"[5:-2]
+
+import types
+import os
+import stat
+
+import portage_db_template
+
+class database(portage_db_template.database):
+ def module_init(self):
+ self.lastkey = None # Cache
+ self.lastval = None # Cache
+
+ self.fullpath = self.path + "/" + self.category + "/"
+
+ if not os.path.exists(self.fullpath):
+ prevmask=os.umask(0)
+ os.makedirs(self.fullpath, 02775)
+ os.umask(prevmask)
+ try:
+ os.chown(self.fullpath, self.uid, self.gid)
+ os.chmod(self.fullpath, 02775)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+
+ def has_key(self,key):
+ if os.path.exists(self.fullpath+key):
+ return 1
+ return 0
+
+ def keys(self):
+ # XXX: NEED TOOLS SEPERATED
+ # return portage.listdir(self.fullpath,filesonly=1)
+ mykeys = []
+ for x in os.listdir(self.fullpath):
+ if os.path.isfile(self.fullpath+x) and not x.startswith(".update."):
+ mykeys += [x]
+ return mykeys
+
+ def get_values(self,key):
+ if not key:
+ raise KeyError, "key is not set to a valid value"
+
+ try:
+ # give buffering a hint of the pretty much maximal cache size we deal with
+ myf = open(self.fullpath+key, "r", 8192)
+ except OSError, oe:
+ # either the file didn't exist, or it was removed under our feet.
+ return None
+
+
+ # nuke the newlines right off the batt.
+ data = myf.read().splitlines()
+ mdict = {}
+
+ # rely on exceptions to note differing line counts.
+ try:
+ for x in range(0, len(self.dbkeys)):
+ mdict[self.dbkeys[x]] = data[x]
+
+ # do this now, rather then earlier- possible that earlier it might have been wasted
+ # if key count mismatched
+ mdict["_mtime_"] = os.fstat(myf.fileno()).st_mtime
+
+ except IndexError:
+ myf.close()
+ raise ValueError, "Key count mistmatch"
+
+ myf.close()
+ return mdict
+
+ def set_values(self,key,val):
+ if not key:
+ raise KeyError, "No key provided. key:%s val:%s" % (key,val)
+ if not val:
+ raise ValueError, "No value provided. key:%s val:%s" % (key,val)
+
+ # XXX threaded cache updates won't play nice with this.
+ # need a synchronization primitive, or locking (of the fileno, not a seperate file)
+ # to correctly handle threading.
+
+ update_fp = self.fullpath + ".update." + str(os.getpid()) + "." + key
+ myf = open(update_fp,"w")
+ myf.writelines( [ val[x] +"\n" for x in self.dbkeys] )
+ myf.close()
+
+ os.chown(update_fp, self.uid, self.gid)
+ os.chmod(update_fp, 0664)
+ os.utime(update_fp, (-1,long(val["_mtime_"])))
+ os.rename(update_fp, self.fullpath+key)
+
+ def del_key(self,key):
+ try:
+ os.unlink(self.fullpath+key)
+ except OSError, oe:
+ # just attempt it without checking, due to the fact that
+ # a cache update could be in progress.
+ self.lastkey = None
+ self.lastval = None
+ return 0
+ return 1
+
+ def sync(self):
+ return
+
+ def close(self):
+ return
+
diff --git a/pym/portage_db_template.py b/pym/portage_db_template.py
new file mode 100644
index 000000000..ec602245f
--- /dev/null
+++ b/pym/portage_db_template.py
@@ -0,0 +1,174 @@
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/Attic/portage_db_template.py,v 1.11.2.1 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: portage_db_template.py,v 1.11.2.1 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+import os.path,string
+from portage_util import getconfig, ReadOnlyConfig
+from portage_exception import CorruptionError
+
+class database:
+ def __init__(self,path,category,dbkeys,uid,gid,config_path="/etc/portage/module_configs/"):
+ self.__cacheArray = [None, None, None]
+ self.__cacheKeyArray = [None, None, None]
+ self.__template_init_called = True
+ self.path = path
+ self.category = category
+ self.dbkeys = dbkeys
+ self.uid = uid
+ self.gid = gid
+
+ self.config = None
+ self.__load_config(config_path)
+
+ self.module_init()
+
+ def getModuleName(self):
+ return self.__module__+"."+self.__class__.__name__[:]
+
+ def __load_config(self,config_path):
+ config_file = config_path + "/" + self.getModuleName()
+ self.config = ReadOnlyConfig(config_file)
+
+ def __check_init(self):
+ try:
+ if self.__template_init_called:
+ pass
+ except SystemExit, e:
+ raise
+ except:
+ raise NotImplementedError("db_template.__init__ was overridden")
+
+ def check_key(self,key):
+ if (not key) or not isinstance(key, str):
+ raise KeyError, "No key provided. key: %s" % (key)
+
+ def clear(self):
+ for x in self.keys():
+ self.del_key(x)
+
+ def __addCache(self,key,val):
+ del self.__cacheArray[2]
+ self.__cacheArray.insert(0,val)
+ del self.__cacheKeyArray[2]
+ self.__cacheKeyArray.insert(0,key)
+
+ def __delCache(self,key):
+ i = self.__cacheKeyArray.index(key)
+ self.__cacheArray[i] = None
+ self.__cacheKeyArray[i] = None
+
+ def flushCache(self):
+ self.__cacheArray = [None, None, None]
+ self.__cacheKeyArray = [None, None, None]
+
+ def __getitem__(self,key):
+ if key in self.__cacheKeyArray:
+ i = self.__cacheKeyArray.index(key)
+ return self.__cacheArray[i]
+
+ self.check_key(key)
+ if self.has_key(key):
+ try:
+ values = self.get_values(key)
+ self.__addCache(key,values)
+ return values
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ raise CorruptionError("Corruption detected when reading key '%s': %s" % (key,str(e)))
+ raise KeyError("Key not in db: '%s'" % (key))
+
+ def __setitem__(self,key,values):
+ self.check_key(key)
+ self.__addCache(key,values)
+ return self.set_values(key,values)
+
+ def __delitem__(self,key):
+ self.__delCache(key)
+ return self.del_key(key)
+
+ def has_key(self,key):
+ raise NotImplementedError("Method not defined")
+
+ def keys(self):
+ raise NotImplementedError("Method not defined")
+
+ def get_values(self,key):
+ raise NotImplementedError("Method not defined")
+
+ def set_values(self,key,val):
+ raise NotImplementedError("Method not defined")
+
+ def del_key(self,key):
+ raise NotImplementedError("Method not defined")
+
+ def sync(self):
+ raise NotImplementedError("Method not defined")
+
+ def close(self):
+ raise NotImplementedError("Method not defined")
+
+
+
+def test_database(db_class,path,category,dbkeys,uid,gid):
+ if "_mtime_" not in dbkeys:
+ dbkeys+=["_mtime_"]
+ d = db_class(path,category,dbkeys,uid,gid)
+
+ print "Module: "+str(d.__module__)
+
+ # XXX: Need a way to do this that actually works.
+ for x in dir(database):
+ if x not in dir(d):
+ print "FUNCTION MISSING:",str(x)
+
+ list = d.keys()
+ if(len(list) == 0):
+ values = {}
+ for x in dbkeys:
+ values[x] = x[:]
+ values["_mtime_"] = "1079903037"
+ d.set_values("test-2.2.3-r1", values)
+ d.set_values("test-2.2.3-r2", values)
+ d.set_values("test-2.2.3-r3", values)
+ d.set_values("test-2.2.3-r4", values)
+
+ list = d.keys()
+ print "Key count:",len(list)
+
+ values = d.get_values(list[0])
+ print "value count:",len(values)
+
+ mykey = "foobar-1.2.3-r4"
+
+ d.check_key(mykey)
+ d.set_values(mykey, values)
+ d.sync()
+ del d
+
+ d = db_class(path,category,dbkeys,uid,gid)
+ new_vals = d.get_values(mykey)
+
+ if dbkeys and new_vals:
+ for x in dbkeys:
+ if x not in new_vals.keys():
+ print "---",x
+ for x in new_vals.keys():
+ if x not in dbkeys:
+ print "+++",x
+ else:
+ print "Mismatched:",dbkeys,new_vals
+
+ d.del_key(mykey)
+
+ print "Should be None:",d.get_values(mykey)
+
+ d.clear()
+
+ d.sync
+ d.close
+
+ del d
+
+ print "Done."
diff --git a/pym/portage_db_test.py b/pym/portage_db_test.py
new file mode 100644
index 000000000..d1a7ce5e4
--- /dev/null
+++ b/pym/portage_db_test.py
@@ -0,0 +1,21 @@
+#!/usr/bin/python -O
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/Attic/portage_db_test.py,v 1.3.2.1 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: portage_db_test.py,v 1.3.2.1 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+import portage
+import portage_db_template
+import portage_db_anydbm
+import portage_db_flat
+import portage_db_cpickle
+
+import os
+
+uid = os.getuid()
+gid = os.getgid()
+
+portage_db_template.test_database(portage_db_flat.database,"/var/cache/edb/dep", "sys-apps",portage.auxdbkeys,uid,gid)
+portage_db_template.test_database(portage_db_cpickle.database,"/var/cache/edb/dep","sys-apps",portage.auxdbkeys,uid,gid)
+portage_db_template.test_database(portage_db_anydbm.database,"/var/cache/edb/dep", "sys-apps",portage.auxdbkeys,uid,gid)
+
diff --git a/pym/portage_dep.py b/pym/portage_dep.py
new file mode 100644
index 000000000..3e7f2e8c6
--- /dev/null
+++ b/pym/portage_dep.py
@@ -0,0 +1,155 @@
+# deps.py -- Portage dependency resolution functions
+# Copyright 2003-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_dep.py,v 1.15.2.3 2005/04/02 14:07:59 jstubbs Exp $
+cvs_id_string="$Id: portage_dep.py,v 1.15.2.3 2005/04/02 14:07:59 jstubbs Exp $"[5:-2]
+
+# DEPEND SYNTAX:
+#
+# 'use?' only affects the immediately following word!
+# Nesting is the only legal way to form multiple '[!]use?' requirements.
+#
+# Where: 'a' and 'b' are use flags, and 'z' is a depend atom.
+#
+# "a? z" -- If 'a' in [use], then b is valid.
+# "a? ( z )" -- Syntax with parenthesis.
+# "a? b? z" -- Deprecated.
+# "a? ( b? z )" -- Valid
+# "a? ( b? ( z ) ) -- Valid
+#
+
+import os,string,types,sys,copy
+import portage_exception
+
+def strip_empty(myarr):
+ for x in range(len(myarr)-1, -1, -1):
+ if not myarr[x]:
+ del myarr[x]
+ return myarr
+
+def paren_reduce(mystr,tokenize=1):
+ "Accepts a list of strings, and converts '(' and ')' surrounded items to sub-lists"
+ mylist = []
+ while mystr:
+ if ("(" not in mystr) and (")" not in mystr):
+ freesec = mystr
+ subsec = None
+ tail = ""
+ elif mystr[0] == ")":
+ return [mylist,mystr[1:]]
+ elif ("(" in mystr) and (mystr.index("(") < mystr.index(")")):
+ freesec,subsec = mystr.split("(",1)
+ subsec,tail = paren_reduce(subsec,tokenize)
+ else:
+ subsec,tail = mystr.split(")",1)
+ if tokenize:
+ subsec = strip_empty(subsec.split(" "))
+ return [mylist+subsec,tail]
+ return mylist+[subsec],tail
+ mystr = tail
+ if freesec:
+ if tokenize:
+ mylist = mylist + strip_empty(freesec.split(" "))
+ else:
+ mylist = mylist + [freesec]
+ if subsec is not None:
+ mylist = mylist + [subsec]
+ return mylist
+
+def use_reduce(deparray, uselist=[], masklist=[], matchall=0, excludeall=[]):
+ """Takes a paren_reduce'd array and reduces the use? conditionals out
+ leaving an array with subarrays
+ """
+ # Quick validity checks
+ for x in range(1,len(deparray)):
+ if deparray[x] in ["||","&&"]:
+ if len(deparray) == x:
+ # Operator is the last element
+ raise portage_exception.InvalidDependString("INVALID "+deparray[x]+" DEPEND STRING: "+str(deparray))
+ if type(deparray[x+1]) != types.ListType:
+ # Operator is not followed by a list
+ raise portage_exception.InvalidDependString("INVALID "+deparray[x]+" DEPEND STRING: "+str(deparray))
+ if deparray and deparray[-1] and deparray[-1][-1] == "?":
+ # Conditional with no target
+ raise portage_exception.InvalidDependString("INVALID "+deparray[x]+" DEPEND STRING: "+str(deparray))
+
+ #XXX: Compatibility -- Still required?
+ if ("*" in uselist):
+ matchall=1
+
+ mydeparray = deparray[:]
+ rlist = []
+ while mydeparray:
+ head = mydeparray.pop(0)
+
+ if type(head) == types.ListType:
+ rlist = rlist + [use_reduce(head, uselist, masklist, matchall, excludeall)]
+
+ else:
+ if head[-1] == "?": # Use reduce next group on fail.
+ # Pull any other use conditions and the following atom or list into a separate array
+ newdeparray = [head]
+ while isinstance(newdeparray[-1], str) and newdeparray[-1][-1] == "?":
+ if mydeparray:
+ newdeparray.append(mydeparray.pop(0))
+ else:
+ raise ValueError, "Conditional with no target."
+
+ # Deprecation checks
+ warned = 0
+ if len(newdeparray[-1]) == 0:
+ sys.stderr.write("Note: Empty target in string. (Deprecated)\n")
+ warned = 1
+ if len(newdeparray) != 2:
+ sys.stderr.write("Note: Nested use flags without parenthesis (Deprecated)\n")
+ warned = 1
+ if warned:
+ sys.stderr.write(" --> "+string.join(map(str,[head]+newdeparray))+"\n")
+
+ # Check that each flag matches
+ ismatch = True
+ for head in newdeparray[:-1]:
+ head = head[:-1]
+ if head[0] == "!":
+ head = head[1:]
+ if not matchall and head in uselist or head in excludeall:
+ ismatch = False
+ break
+ elif head not in masklist:
+ if not matchall and head not in uselist:
+ ismatch = False
+ break
+ else:
+ ismatch = False
+
+ # If they all match, process the target
+ if ismatch:
+ target = newdeparray[-1]
+ if isinstance(target, list):
+ rlist += [use_reduce(target, uselist, masklist, matchall, excludeall)]
+ else:
+ rlist += [target]
+
+ else:
+ rlist += [head]
+
+ return rlist
+
+
+def dep_opconvert(deplist):
+ """Move || and && to the beginning of the following arrays"""
+ # Hack in management of the weird || for dep_wordreduce, etc.
+ # dep_opconvert: [stuff, ["||", list, of, things]]
+ # At this point: [stuff, "||", [list, of, things]]
+ retlist = []
+ x = 0
+ while x != len(deplist):
+ if isinstance(deplist[x], list):
+ retlist.append(dep_opconvert(deplist[x]))
+ elif deplist[x] == "||" or deplist[x] == "&&":
+ retlist.append([deplist[x]] + dep_opconvert(deplist[x+1]))
+ x += 1
+ else:
+ retlist.append(deplist[x])
+ x += 1
+ return retlist
diff --git a/pym/portage_exception.py b/pym/portage_exception.py
new file mode 100644
index 000000000..b37f7caa4
--- /dev/null
+++ b/pym/portage_exception.py
@@ -0,0 +1,163 @@
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_exception.py,v 1.8.2.1 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: portage_exception.py,v 1.8.2.1 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+class PortageException(Exception):
+ """General superclass for portage exceptions"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class CorruptionError(PortageException):
+ """Corruption indication"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class InvalidDependString(PortageException):
+ """An invalid depend string has been encountered"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class InvalidVersionString(PortageException):
+ """An invalid version string has been encountered"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class SecurityViolation(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class IncorrectParameter(PortageException):
+ """An parameter of the wrong type was passed"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class MissingParameter(PortageException):
+ """An parameter is required for the action requested but was not passed"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+
+class InvalidData(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class InvalidDataType(PortageException):
+ """An incorrect type was passed instead of the expected one"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+
+
+
+class InvalidLocation(PortageException):
+ """Data was not found when it was expected to exist or was specified incorrectly"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class FileNotFound(InvalidLocation):
+ """A file was not found when it was expected to exist"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class DirectoryNotFound(InvalidLocation):
+ """A directory was not found when it was expected to exist"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+
+
+class CommandNotFound(PortageException):
+ """A required binary was not available or executable"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+
+
+class PortagePackageException(PortageException):
+ """Malformed or missing package data"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class PackageNotFound(PortagePackageException):
+ """Missing Ebuild or Binary"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class InvalidPackageName(PortagePackageException):
+ """Malformed package name"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+
+
+
+class SignatureException(PortageException):
+ """Signature was not present in the checked file"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class DigestException(SignatureException):
+ """A problem exists in the digest"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class MissingSignature(SignatureException):
+ """Signature was not present in the checked file"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class InvalidSignature(SignatureException):
+ """Signature was checked and was not a valid, current, nor trusted signature"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
+class UntrustedSignature(SignatureException):
+ """Signature was not certified to the desired security level"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ return repr(self.value)
+
diff --git a/pym/portage_exec.py b/pym/portage_exec.py
new file mode 100644
index 000000000..c613ab235
--- /dev/null
+++ b/pym/portage_exec.py
@@ -0,0 +1,215 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_exec.py,v 1.13.2.4 2005/04/17 09:01:56 jstubbs Exp $
+cvs_id_string="$Id: portage_exec.py,v 1.13.2.4 2005/04/17 09:01:56 jstubbs Exp $"[5:-2]
+
+import os,types,atexit,string,stat
+import signal
+import portage_data
+import portage_util
+
+try:
+ import resource
+ max_fd_limit=resource.getrlimit(RLIMIT_NOFILE)
+except SystemExit, e:
+ raise
+except:
+ # hokay, no resource module.
+ max_fd_limit=256
+
+spawned_pids = []
+def cleanup():
+ global spawned_pids
+ while spawned_pids:
+ pid = spawned_pids.pop()
+ try:
+ os.kill(pid,SIGKILL)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+atexit.register(cleanup)
+
+from portage_const import BASH_BINARY,SANDBOX_BINARY,SANDBOX_PIDS_FILE
+
+sandbox_capable = (os.path.exists(SANDBOX_BINARY) and os.access(SANDBOX_BINARY, os.X_OK))
+
+def spawn_bash(mycommand,env={},debug=False,opt_name=None,**keywords):
+ args=[BASH_BINARY]
+ if not opt_name:
+ opt_name=mycommand.split()[0]
+ if not env.has_key("BASH_ENV"):
+ env["BASH_ENV"] = "/etc/spork/is/not/valid/profile.env"
+ if debug:
+ args.append("-x")
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args,env=env,opt_name=opt_name,**keywords)
+
+def spawn_sandbox(mycommand,uid=None,opt_name=None,**keywords):
+ if not sandbox_capable:
+ return spawn_bash(mycommand,opt_name=opt_name,**keywords)
+ args=[SANDBOX_BINARY]
+ if not opt_name:
+ opt_name=mycommand.split()[0]
+ args.append(mycommand)
+ if not uid:
+ uid=os.getuid()
+ try:
+ os.chown(SANDBOX_PIDS_FILE,uid,portage_data.portage_gid)
+ os.chmod(SANDBOX_PIDS_FILE,0664)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ return spawn(args,uid=uid,opt_name=opt_name,**keywords)
+
+# base spawn function
+def spawn(mycommand,env={},opt_name=None,fd_pipes=None,returnpid=False,uid=None,gid=None,groups=None,umask=None,logfile=None,path_lookup=True):
+ if type(mycommand)==types.StringType:
+ mycommand=mycommand.split()
+ myc = mycommand[0]
+ if not os.access(myc, os.X_OK):
+ if not path_lookup:
+ return None
+ myc = find_binary(myc)
+ if myc == None:
+ return None
+
+ mypid=[]
+ if logfile:
+ pr,pw=os.pipe()
+ mypid.extend(spawn(('tee','-i','-a',logfile),returnpid=True,fd_pipes={0:pr,1:1,2:2}))
+ retval=os.waitpid(mypid[-1],os.WNOHANG)[1]
+ if retval != 0:
+ # he's dead jim.
+ if (retval & 0xff)==0:
+ return (retval >> 8) # exit code
+ else:
+ return ((retval & 0xff) << 8) # signal
+ if not fd_pipes:
+ fd_pipes={}
+ fd_pipes[0] = 0
+ fd_pipes[1]=pw
+ fd_pipes[2]=pw
+
+ if not opt_name:
+ opt_name = mycommand[0]
+ myargs=[opt_name]
+ myargs.extend(mycommand[1:])
+ mypid.append(os.fork())
+ if mypid[-1] == 0:
+ # this may look ugly, but basically it moves file descriptors around to ensure no
+ # handles that are needed are accidentally closed during the final dup2 calls.
+ trg_fd=[]
+ if type(fd_pipes)==types.DictType:
+ src_fd=[]
+ k=fd_pipes.keys()
+ k.sort()
+ for x in k:
+ trg_fd.append(x)
+ src_fd.append(fd_pipes[x])
+ for x in range(0,len(trg_fd)):
+ if trg_fd[x] == src_fd[x]:
+ continue
+ if trg_fd[x] in src_fd[x+1:]:
+ new=os.dup2(trg_fd[x],max(src_fd) + 1)
+ os.close(trg_fd[x])
+ try:
+ while True:
+ src_fd[s.index(trg_fd[x])]=new
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ for x in range(0,len(trg_fd)):
+ if trg_fd[x] != src_fd[x]:
+ os.dup2(src_fd[x], trg_fd[x])
+ else:
+ trg_fd=[0,1,2]
+ for x in range(0,max_fd_limit):
+ if x not in trg_fd:
+ try:
+ os.close(x)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+ # note this order must be preserved- can't change gid/groups if you change uid first.
+ if gid:
+ os.setgid(gid)
+ if groups:
+ os.setgroups(groups)
+ if uid:
+ os.setuid(uid)
+ if umask:
+ os.umask(umask)
+ try:
+ # XXX: We would do this to stop ebuild.sh from getting any
+ # XXX: output, and consequently, we'd get to handle the sigINT.
+ #os.close(sys.stdin.fileno())
+ pass
+ except SystemExit, e:
+ raise
+ except:
+ pass
+
+ try:
+ #print "execing", myc, myargs
+ os.execve(myc,myargs,env)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ raise str(e)+":\n "+myc+" "+string.join(myargs)
+ # If the execve fails, we need to report it, and exit
+ # *carefully* --- report error here
+ os._exit(1)
+ sys.exit(1)
+ return # should never get reached
+
+ if logfile:
+ os.close(pr)
+ os.close(pw)
+
+ if returnpid:
+ global spawned_pids
+ spawned_pids.append(mypid[-1])
+ return mypid
+ while len(mypid):
+ retval=os.waitpid(mypid[-1],0)[1]
+ if retval != 0:
+ for x in mypid[0:-1]:
+ try:
+ os.kill(x,signal.SIGTERM)
+ if os.waitpid(x,os.WNOHANG)[1] == 0:
+ # feisty bugger, still alive.
+ os.kill(x,signal.SIGKILL)
+ os.waitpid(x,0)
+ except OSError, oe:
+ if oe.errno not in (10,3):
+ raise oe
+
+ # at this point we've killed all other kid pids generated via this call.
+ # return now.
+
+ if (retval & 0xff)==0:
+ return (retval >> 8) # return exit code
+ else:
+ return ((retval & 0xff) << 8) # interrupted by signal
+ else:
+ mypid.pop(-1)
+ return 0
+
+def find_binary(myc):
+ p=os.getenv("PATH")
+ if p == None:
+ return None
+ for x in p.split(":"):
+ # if it exists, and is executable
+ if os.access("%s/%s" % (x,myc), os.X_OK):
+ return "%s/%s" % (x,myc)
+
+ return None
+
+
diff --git a/pym/portage_file.py b/pym/portage_file.py
new file mode 100644
index 000000000..5cee6458b
--- /dev/null
+++ b/pym/portage_file.py
@@ -0,0 +1,62 @@
+# portage_data.py -- Calculated/Discovered Data Values
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_file.py,v 1.3.2.1 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: portage_file.py,v 1.3.2.1 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+import os
+import portage_data
+import portage_exception
+from portage_localization import _
+
+def normpath(mypath):
+ newpath = os.path.normpath(mypath)
+ if len(newpath) > 1:
+ if newpath[:2] == "//":
+ newpath = newpath[1:]
+ return newpath
+
+
+def makedirs(path, perms=0755, uid=None, gid=None, must_chown=False):
+ old_umask = os.umask(0)
+ if(uid == None):
+ uid = portage_data.portage_uid
+ if(gid == None):
+ gid = portage_data.portage_gid
+ if not path:
+ raise portage_exception.InvalidParameter, _("Invalid path: type: '%(type)s' value: '%(path)s'") % {"path": path, "type": type(path)}
+ if(perm > 1535) or (perm == 0):
+ raise portage_exception.InvalidParameter, _("Invalid permissions passed. Value is octal and no higher than 02777.")
+
+ mypath = normpath(path)
+ dirs = string.split(path, "/")
+
+ mypath = ""
+ if dirs and dirs[0] == "":
+ mypath = "/"
+ dirs = dirs[1:]
+ for x in dirs:
+ mypath += x+"/"
+ if not os.path.exists(mypath):
+ os.mkdir(mypath, perm)
+ try:
+ os.chown(mypath, uid, gid)
+ except SystemExit, e:
+ raise
+ except:
+ if must_chown:
+ os.umask(old_umask)
+ raise
+ portage_util.writemsg(_("Failed to chown: %(path)s to %(uid)s:%(gid)s\n") % {"path":mypath,"uid":uid,"gid":gid})
+
+ os.umask(old_umask)
+
+
+
+
+
+
+
+
+
+
diff --git a/pym/portage_gpg.py b/pym/portage_gpg.py
new file mode 100644
index 000000000..deaf36ad4
--- /dev/null
+++ b/pym/portage_gpg.py
@@ -0,0 +1,149 @@
+# portage_gpg.py -- core Portage functionality
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_gpg.py,v 1.6.2.1 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: portage_gpg.py,v 1.6.2.1 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+import os
+import copy
+import types
+import commands
+import portage_exception
+import portage_checksum
+
+GPG_BINARY = "/usr/bin/gpg"
+GPG_OPTIONS = " --lock-never --no-random-seed-file --no-greeting --no-sig-cache "
+GPG_VERIFY_FLAGS = " --verify "
+GPG_KEYDIR = " --homedir '%s' "
+GPG_KEYRING = " --keyring '%s' "
+
+UNTRUSTED = 0
+EXISTS = UNTRUSTED + 1
+MARGINAL = EXISTS + 1
+TRUSTED = MARGINAL + 1
+
+def fileStats(filepath):
+ mya = []
+ for x in os.stat(filepath):
+ mya.append(x)
+ mya.append(portage_checksum.perform_checksum(filepath))
+ return mya
+
+
+class FileChecker:
+ def __init__(self,keydir=None,keyring=None,requireSignedRing=False,minimumTrust=EXISTS):
+ self.minimumTrust = TRUSTED # Default we require trust. For rings.
+ self.keydir = None
+ self.keyring = None
+ self.keyringPath = None
+ self.keyringStats = None
+ self.keyringIsTrusted = False
+
+ if (keydir != None):
+ # Verify that the keydir is valid.
+ if type(keydir) != types.StringType:
+ raise portage_exception.InvalidDataType, "keydir argument: %s" % keydir
+ if not os.path.isdir(keydir):
+ raise portage_exception.DirectoryNotFound, "keydir: %s" % keydir
+ self.keydir = copy.deepcopy(keydir)
+
+ if (keyring != None):
+ # Verify that the keyring is a valid filename and exists.
+ if type(keyring) != types.StringType:
+ raise portage_exception.InvalidDataType, "keyring argument: %s" % keyring
+ if keyring.find("/") != -1:
+ raise portage_exception.InvalidData, "keyring: %s" % keyring
+ pathname = ""
+ if keydir:
+ pathname = keydir + "/" + keyring
+ if not os.path.isfile(pathname):
+ raise portage_exception.FileNotFound, "keyring missing: %s (dev.gentoo.org/~carpaski/gpg/)" % pathname
+
+ keyringPath = keydir+"/"+keyring
+
+ if not keyring or not keyringPath and requireSignedRing:
+ raise portage_exception.MissingParameter
+
+ self.keyringStats = fileStats(keyringPath)
+ self.minimumTrust = TRUSTED
+ if not self.verify(keyringPath, keyringPath+".asc"):
+ self.keyringIsTrusted = False
+ if requireSignedRing:
+ raise portage_exception.InvalidSignature, "Required keyring verification: "+keyringPath
+ else:
+ self.keyringIsTrusted = True
+
+ self.keyring = copy.deepcopy(keyring)
+ self.keyringPath = self.keydir+"/"+self.keyring
+ self.minimumTrust = minimumTrust
+
+ def _verifyKeyring(self):
+ if self.keyringStats and self.keyringPath:
+ new_stats = fileStats(self.keyringPath)
+ if new_stats != self.keyringStats:
+ raise portage_exception.SecurityViolation, "GPG keyring changed!"
+
+ def verify(self, filename, sigfile=None):
+ """Uses minimumTrust to determine if it is Valid/True or Invalid/False"""
+ self._verifyKeyring()
+
+ if not os.path.isfile(filename):
+ raise portage_exception.FileNotFound, filename
+
+ if sigfile and not os.path.isfile(sigfile):
+ raise portage_exception.FileNotFound, sigfile
+
+ if self.keydir and not os.path.isdir(self.keydir):
+ raise portage_exception.DirectoryNotFound, filename
+
+ if self.keyringPath:
+ if not os.path.isfile(self.keyringPath):
+ raise portage_exception.FileNotFound, self.keyringPath
+
+ if not os.path.isfile(filename):
+ raise portage_exception.CommandNotFound, filename
+
+ command = GPG_BINARY + GPG_VERIFY_FLAGS + GPG_OPTIONS
+ if self.keydir:
+ command += GPG_KEYDIR % (self.keydir)
+ if self.keyring:
+ command += GPG_KEYRING % (self.keyring)
+
+ if sigfile:
+ command += " '"+sigfile+"'"
+ command += " '"+filename+"'"
+
+ result,output = commands.getstatusoutput(command)
+
+ signal = result & 0xff
+ result = (result >> 8)
+
+ if signal:
+ raise SignalCaught, "Signal: %d" % (signal)
+
+ trustLevel = UNTRUSTED
+ if result == 0:
+ trustLevel = TRUSTED
+ #if output.find("WARNING") != -1:
+ # trustLevel = MARGINAL
+ if output.find("BAD") != -1:
+ raise portage_exception.InvalidSignature, filename
+ elif result == 1:
+ trustLevel = EXISTS
+ if output.find("BAD") != -1:
+ raise portage_exception.InvalidSignature, filename
+ elif result == 2:
+ trustLevel = UNTRUSTED
+ if output.find("could not be verified") != -1:
+ raise portage_exception.MissingSignature, filename
+ if output.find("public key not found") != -1:
+ if self.keyringIsTrusted: # We trust the ring, but not the key specifically.
+ trustLevel = MARGINAL
+ else:
+ raise portage_exception.InvalidSignature, filename+" (Unknown Signature)"
+ else:
+ raise portage_exception.UnknownCondition, "GPG returned unknown result: %d" % (result)
+
+ if trustLevel >= self.minimumTrust:
+ return True
+ return False
diff --git a/pym/portage_localization.py b/pym/portage_localization.py
new file mode 100644
index 000000000..8aafc5107
--- /dev/null
+++ b/pym/portage_localization.py
@@ -0,0 +1,21 @@
+# portage_localization.py -- Code to manage/help portage localization.
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_localization.py,v 1.2.2.1 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: portage_localization.py,v 1.2.2.1 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+# We define this to make the transition easier for us.
+def _(mystr):
+ return mystr
+
+
+def localization_example():
+ # Dict references allow translators to rearrange word order.
+ print _("You can use this string for translating.")
+ print _("Strings can be formatted with %(mystr)s like this.") % {"mystr": "VALUES"}
+
+ a_value = "value.of.a"
+ b_value = 123
+ c_value = [1,2,3,4]
+ print _("A: %(a)s -- B: %(b)s -- C: %(c)s") % {"a":a_value,"b":b_value,"c":c_value}
+
diff --git a/pym/portage_locks.py b/pym/portage_locks.py
new file mode 100644
index 000000000..cf248e4b6
--- /dev/null
+++ b/pym/portage_locks.py
@@ -0,0 +1,360 @@
+# portage: Lock management code
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_locks.py,v 1.18.2.2 2005/01/16 02:35:33 carpaski Exp $
+cvs_id_string="$Id: portage_locks.py,v 1.18.2.2 2005/01/16 02:35:33 carpaski Exp $"[5:-2]
+
+import atexit
+import errno
+import os
+import stat
+import string
+import time
+import types
+import portage_exception
+import portage_file
+import portage_util
+import portage_data
+from portage_localization import _
+
+HARDLINK_FD = -2
+
+hardlock_path_list = []
+def clean_my_hardlocks():
+ for x in hardlock_path_list:
+ hardlock_cleanup(x)
+def add_hardlock_file_to_cleanup(path):
+ mypath = portage_file.normpath(path)
+ if os.path.isfile(mypath):
+ mypath = os.path.dirname(mypath)
+ if os.path.isdir(mypath):
+ hardlock_path_list = mypath[:]
+
+atexit.register(clean_my_hardlocks)
+
+def lockdir(mydir):
+ return lockfile(mydir,wantnewlockfile=1)
+def unlockdir(mylock):
+ return unlockfile(mylock)
+
+def lockfile(mypath,wantnewlockfile=0,unlinkfile=0):
+ """Creates all dirs upto, the given dir. Creates a lockfile
+ for the given directory as the file: directoryname+'.portage_lockfile'."""
+ import fcntl
+
+ if not mypath:
+ raise portage_exception.InvalidData, "Empty path given"
+
+ if type(mypath) == types.StringType and mypath[-1] == '/':
+ mypath = mypath[:-1]
+
+ if type(mypath) == types.FileType:
+ mypath = mypath.fileno()
+ if type(mypath) == types.IntType:
+ lockfilename = mypath
+ wantnewlockfile = 0
+ unlinkfile = 0
+ elif wantnewlockfile:
+ lockfilename = mypath+".portage_lockfile"
+ unlinkfile = 1
+ else:
+ lockfilename = mypath
+
+ if type(mypath) == types.StringType:
+ if not os.path.exists(os.path.dirname(mypath)):
+ raise portage_exception.DirectoryNotFound, os.path.dirname(mypath)
+ if not os.path.exists(lockfilename):
+ old_mask=os.umask(000)
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR,0660)
+ try:
+ if os.stat(lockfilename).st_gid != portage_data.portage_gid:
+ os.chown(lockfilename,os.getuid(),portage_data.portage_gid)
+ except SystemExit, e:
+ raise
+ except OSError, e:
+ if e[0] == 2: # No such file or directory
+ return lockfile(mypath,wantnewlockfile,unlinkfile)
+ else:
+ portage_util.writemsg("Cannot chown a lockfile. This could cause inconvenience later.\n");
+ os.umask(old_mask)
+ else:
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR,0660)
+
+ elif type(mypath) == types.IntType:
+ myfd = mypath
+
+ else:
+ raise ValueError, "Unknown type passed in '%s': '%s'" % (type(mypath),mypath)
+
+ # try for a non-blocking lock, if it's held, throw a message
+ # we're waiting on lockfile and use a blocking attempt.
+ locking_method = fcntl.lockf
+ try:
+ fcntl.lockf(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except IOError, e:
+ if "errno" not in dir(e):
+ raise
+ if e.errno == errno.EAGAIN:
+ # resource temp unavailable; eg, someone beat us to the lock.
+ if type(mypath) == types.IntType:
+ print "waiting for lock on fd %i" % myfd
+ else:
+ print "waiting for lock on %s" % lockfilename
+ # try for the exclusive lock now.
+ fcntl.lockf(myfd,fcntl.LOCK_EX)
+ elif e.errno == errno.ENOLCK:
+ # We're not allowed to lock on this FS.
+ os.close(myfd)
+ link_success = False
+ if lockfilename == str(lockfilename):
+ if wantnewlockfile:
+ try:
+ if os.stat(lockfilename)[stat.ST_NLINK] == 1:
+ os.unlink(lockfilename)
+ except Exception, e:
+ pass
+ link_success = hardlink_lockfile(lockfilename)
+ if not link_success:
+ raise
+ locking_method = None
+ myfd = HARDLINK_FD
+ else:
+ raise
+
+
+ if type(lockfilename) == types.StringType and not os.path.exists(lockfilename):
+ # The file was deleted on us... Keep trying to make one...
+ os.close(myfd)
+ portage_util.writemsg("lockfile recurse\n",1)
+ lockfilename,myfd,unlinkfile,locking_method = lockfile(mypath,wantnewlockfile,unlinkfile)
+
+ portage_util.writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1)
+ return (lockfilename,myfd,unlinkfile,locking_method)
+
+def unlockfile(mytuple):
+ import fcntl
+
+ #XXX: Compatability hack.
+ if len(mytuple) == 3:
+ lockfilename,myfd,unlinkfile = mytuple
+ locking_method = fcntl.flock
+ elif len(mytuple) == 4:
+ lockfilename,myfd,unlinkfile,locking_method = mytuple
+ else:
+ raise
+
+ if(myfd == HARDLINK_FD):
+ unhardlink_lockfile(lockfilename)
+ return True
+
+ if type(lockfilename) == types.StringType and not os.path.exists(lockfilename):
+ portage_util.writemsg("lockfile does not exist '%s'\n" % lockfilename,1)
+ if (myfd != None) and type(lockfilename) == types.StringType:
+ os.close(myfd)
+ return False
+
+ try:
+ if myfd == None:
+ myfd = os.open(lockfilename, os.O_WRONLY,0660)
+ unlinkfile = 1
+ locking_method(myfd,fcntl.LOCK_UN)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ if type(lockfilename) == types.StringType:
+ os.close(myfd)
+ raise IOError, "Failed to unlock file '%s'\n" % lockfilename
+
+ try:
+ # This sleep call was added to allow other processes that are
+ # waiting for a lock to be able to grab it before it is deleted.
+ # lockfile() already accounts for this situation, however, and
+ # the sleep here adds more time than is saved overall, so am
+ # commenting until it is proved necessary.
+ #time.sleep(0.0001)
+ if unlinkfile:
+ locking_method(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ # We won the lock, so there isn't competition for it.
+ # We can safely delete the file.
+ portage_util.writemsg("Got the lockfile...\n",1)
+ #portage_util.writemsg("Unlinking...\n")
+ os.unlink(lockfilename)
+ portage_util.writemsg("Unlinked lockfile...\n",1)
+ locking_method(myfd,fcntl.LOCK_UN)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ # We really don't care... Someone else has the lock.
+ # So it is their problem now.
+ portage_util.writemsg("Failed to get lock... someone took it.\n",1)
+ portage_util.writemsg(str(e)+"\n",1)
+
+ # why test lockfilename? because we may have been handed an
+ # fd originally, and the caller might not like having their
+ # open fd closed automatically on them.
+ if type(lockfilename) == types.StringType:
+ os.close(myfd)
+
+ return True
+
+
+
+
+def hardlock_name(path):
+ return path+".hardlock-"+os.uname()[1]+"-"+str(os.getpid())
+
+def hardlink_active(lock):
+ if not os.path.exists(lock):
+ return False
+ # XXXXXXXXXXXXXXXXXXXXXXXXXX
+
+def hardlink_is_mine(link,lock):
+ try:
+ myhls = os.stat(link)
+ mylfs = os.stat(lock)
+ except SystemExit, e:
+ raise
+ except:
+ myhls = None
+ mylfs = None
+
+ if myhls:
+ if myhls[stat.ST_NLINK] == 2:
+ return True
+ if mylfs:
+ if mylfs[stat.ST_INO] == myhls[stat.ST_INO]:
+ return True
+ return False
+
+def hardlink_lockfile(lockfilename, max_wait=14400):
+ """Does the NFS, hardlink shuffle to ensure locking on the disk.
+ We create a PRIVATE lockfile, that is just a placeholder on the disk.
+ Then we HARDLINK the real lockfile to that private file.
+ If our file can 2 references, then we have the lock. :)
+ Otherwise we lather, rise, and repeat.
+ We default to a 4 hour timeout.
+ """
+
+ add_hardlock_file_to_cleanup(lockfilename)
+
+ start_time = time.time()
+ myhardlock = hardlock_name(lockfilename)
+ reported_waiting = False
+
+ while(time.time() < (start_time + max_wait)):
+ # We only need it to exist.
+ myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0660)
+ os.close(myfd)
+
+ if not os.path.exists(myhardlock):
+ raise portage_exception.FileNotFound, _("Created lockfile is missing: %(filename)s") % {"filename":myhardlock}
+
+ try:
+ res = os.link(myhardlock, lockfilename)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ #print "lockfile(): Hardlink: Link failed."
+ #print "Exception: ",e
+ pass
+
+ if hardlink_is_mine(myhardlock, lockfilename):
+ # We have the lock.
+ if reported_waiting:
+ print
+ return True
+
+ if reported_waiting:
+ portage_util.writemsg(".")
+ else:
+ reported_waiting = True
+ print
+ print "Waiting on (hardlink) lockfile: (one '.' per 3 seconds)"
+ print "This is a feature to prevent distfiles corruption."
+ print "/usr/lib/portage/bin/clean_locks can fix stuck locks."
+ print "Lockfile: " + lockfilename
+ time.sleep(3)
+
+ os.unlink(myhardlock)
+ return False
+
+def unhardlink_lockfile(lockfilename):
+ myhardlock = hardlock_name(lockfilename)
+ try:
+ if os.path.exists(myhardlock):
+ os.unlink(myhardlock)
+ if os.path.exists(lockfilename):
+ os.unlink(lockfilename)
+ except SystemExit, e:
+ raise
+ except:
+ portage_util.writemsg("Something strange happened to our hardlink locks.\n")
+
+def hardlock_cleanup(path, remove_all_locks=False):
+ mypid = str(os.getpid())
+ myhost = os.uname()[1]
+ mydl = os.listdir(path)
+
+ results = []
+ mycount = 0
+
+ mylist = {}
+ for x in mydl:
+ if os.path.isfile(path+"/"+x):
+ parts = string.split(x, ".hardlock-")
+ if len(parts) == 2:
+ filename = parts[0]
+ hostpid = string.split(parts[1],"-")
+ host = string.join(hostpid[:-1], "-")
+ pid = hostpid[-1]
+
+ if not mylist.has_key(filename):
+ mylist[filename] = {}
+ if not mylist[filename].has_key(host):
+ mylist[filename][host] = []
+ mylist[filename][host].append(pid)
+
+ mycount += 1
+
+
+ results.append("Found %(count)s locks" % {"count":mycount})
+
+ for x in mylist.keys():
+ if mylist[x].has_key(myhost) or remove_all_locks:
+ mylockname = hardlock_name(path+"/"+x)
+ if hardlink_is_mine(mylockname, path+"/"+x) or \
+ not os.path.exists(path+"/"+x) or \
+ remove_all_locks:
+ for y in mylist[x].keys():
+ for z in mylist[x][y]:
+ filename = path+"/"+x+".hardlock-"+y+"-"+z
+ if filename == mylockname:
+ continue
+ try:
+ # We're sweeping through, unlinking everyone's locks.
+ os.unlink(filename)
+ results.append(_("Unlinked: ") + filename)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ pass
+ try:
+ os.unlink(path+"/"+x)
+ results.append(_("Unlinked: ") + path+"/"+x)
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ pass
+ else:
+ try:
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ pass
+
+ return results
+
diff --git a/pym/portage_util.py b/pym/portage_util.py
new file mode 100644
index 000000000..ee1c38b3a
--- /dev/null
+++ b/pym/portage_util.py
@@ -0,0 +1,459 @@
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/portage_util.py,v 1.11.2.6 2005/04/23 07:26:04 jstubbs Exp $
+cvs_id_string="$Id: portage_util.py,v 1.11.2.6 2005/04/23 07:26:04 jstubbs Exp $"[5:-2]
+
+import sys,string,shlex,os.path
+
+noiselimit = 0
+def writemsg(mystr,noiselevel=0):
+ """Prints out warning and debug messages based on the noiselimit setting"""
+ global noiselimit
+ if noiselevel <= noiselimit:
+ sys.stderr.write(mystr)
+ sys.stderr.flush()
+
+def grabfile(myfilename, compat_level=0):
+ """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
+ begins with a #, it is ignored, as are empty lines"""
+
+ try:
+ myfile=open(myfilename,"r")
+ except IOError:
+ return []
+ mylines=myfile.readlines()
+ myfile.close()
+ newlines=[]
+ for x in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ myline=string.join(string.split(x))
+ if not len(myline):
+ continue
+ if myline[0]=="#":
+ # Check if we have a compat-level string. BC-integration data.
+ # '##COMPAT==>N<==' 'some string attached to it'
+ mylinetest = string.split(myline, "<==", 1)
+ if len(mylinetest) == 2:
+ myline_potential = mylinetest[1]
+ mylinetest = string.split(mylinetest[0],"##COMPAT==>")
+ if len(mylinetest) == 2:
+ if compat_level >= int(mylinetest[1]):
+ # It's a compat line, and the key matches.
+ newlines.append(myline_potential)
+ continue
+ else:
+ continue
+ newlines.append(myline)
+ return newlines
+
+def map_dictlist_vals(func,myDict):
+ """Performs a function on each value of each key in a dictlist.
+ Returns a new dictlist."""
+ new_dl = {}
+ for key in myDict.keys():
+ new_dl[key] = []
+ new_dl[key] = map(func,myDict[key])
+ return new_dl
+
+def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
+ """Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->list.
+ Returns a single dict. Higher index in lists is preferenced."""
+ final_dict = None
+ kill_list = {}
+ for mydict in original_dicts:
+ if mydict == None:
+ continue
+ if final_dict == None:
+ final_dict = {}
+ for y in mydict.keys():
+ if not final_dict.has_key(y):
+ final_dict[y] = []
+ if not kill_list.has_key(y):
+ kill_list[y] = []
+
+ mydict[y].reverse()
+ for thing in mydict[y]:
+ if thing and (thing not in kill_list[y]) and ("*" not in kill_list[y]):
+ if (incremental or (y in incrementals)) and thing[0] == '-':
+ if thing[1:] not in kill_list[y]:
+ kill_list[y] += [thing[1:]]
+ else:
+ if thing not in final_dict[y]:
+ final_dict[y].append(thing[:])
+ mydict[y].reverse()
+ if final_dict.has_key(y) and not final_dict[y]:
+ del final_dict[y]
+ return final_dict
+
+def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
+ """Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->string.
+ Returns a single dict."""
+ final_dict = None
+ for mydict in dicts:
+ if mydict == None:
+ if ignore_none:
+ continue
+ else:
+ return None
+ if final_dict == None:
+ final_dict = {}
+ for y in mydict.keys():
+ if mydict[y]:
+ if final_dict.has_key(y) and (incremental or (y in incrementals)):
+ final_dict[y] += " "+mydict[y][:]
+ else:
+ final_dict[y] = mydict[y][:]
+ mydict[y] = string.join(mydict[y].split()) # Remove extra spaces.
+ return final_dict
+
+def stack_lists(lists, incremental=1):
+ """Stacks an array of list-types into one array. Optionally removing
+ distinct values using '-value' notation. Higher index is preferenced."""
+ new_list = []
+ for x in lists:
+ for y in x:
+ if y:
+ if incremental and y[0]=='-':
+ while y[1:] in new_list:
+ del new_list[new_list.index(y[1:])]
+ else:
+ if y not in new_list:
+ new_list.append(y[:])
+ return new_list
+
+def grab_multiple(basename, locations, handler, all_must_exist=0):
+ mylist = []
+ for x in locations:
+ mylist.append(handler(x+"/"+basename))
+ return mylist
+
+def grabdict(myfilename,juststrings=0,empty=0):
+ """This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary"""
+ newdict={}
+ try:
+ myfile=open(myfilename,"r")
+ except IOError,e:
+ return newdict
+ mylines=myfile.readlines()
+ myfile.close()
+ for x in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ if x[0] == "#":
+ continue
+ myline=string.split(x)
+ if len(myline)<2 and empty==0:
+ continue
+ if len(myline)<1 and empty==1:
+ continue
+ if juststrings:
+ newdict[myline[0]]=string.join(myline[1:])
+ else:
+ newdict[myline[0]]=myline[1:]
+ return newdict
+
+def grabdict_package(myfilename,juststrings=0):
+ pkgs=grabdict(myfilename, juststrings, empty=1)
+ for x in pkgs.keys():
+ if not isvalidatom(x):
+ del(pkgs[x])
+ writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x))
+ return pkgs
+
+def grabfile_package(myfilename,compatlevel=0):
+ pkgs=grabfile(myfilename,compatlevel)
+ for x in range(len(pkgs)-1,-1,-1):
+ pkg = pkgs[x]
+ if pkg[0] == "-":
+ pkg = pkg[1:]
+ if pkg[0] == "*": # Kill this so we can deal the "packages" file too
+ pkg = pkg[1:]
+ if not isvalidatom(pkg):
+ writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]))
+ del(pkgs[x])
+ return pkgs
+
+def grabints(myfilename):
+ newdict={}
+ try:
+ myfile=open(myfilename,"r")
+ except IOError:
+ return newdict
+ mylines=myfile.readlines()
+ myfile.close()
+ for x in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ myline=string.split(x)
+ if len(myline)!=2:
+ continue
+ newdict[myline[0]]=string.atoi(myline[1])
+ return newdict
+
+def writeints(mydict,myfilename):
+ try:
+ myfile=open(myfilename,"w")
+ except IOError:
+ return 0
+ for x in mydict.keys():
+ myfile.write(x+" "+`mydict[x]`+"\n")
+ myfile.close()
+ return 1
+
+def writedict(mydict,myfilename,writekey=1):
+ """Writes out a dict to a file; writekey=0 mode doesn't write out
+ the key and assumes all values are strings, not lists."""
+ try:
+ myfile=open(myfilename,"w")
+ except IOError:
+ writemsg("Failed to open file for writedict(): "+str(myfilename)+"\n")
+ return 0
+ if not writekey:
+ for x in mydict.values():
+ myfile.write(x+"\n")
+ else:
+ for x in mydict.keys():
+ myfile.write(x+" ")
+ for y in mydict[x]:
+ myfile.write(y+" ")
+ myfile.write("\n")
+ myfile.close()
+ return 1
+
+def getconfig(mycfg,tolerant=0,allow_sourcing=False):
+ mykeys={}
+ try:
+ f=open(mycfg,'r')
+ except IOError:
+ return None
+ try:
+ lex=shlex.shlex(f)
+ lex.wordchars=string.digits+string.letters+"~!@#$%*_\:;?,./-+{}"
+ lex.quotes="\"'"
+ if allow_sourcing:
+ lex.source="source"
+ while 1:
+ key=lex.get_token()
+ if (key==''):
+ #normal end of file
+ break;
+ equ=lex.get_token()
+ if (equ==''):
+ #unexpected end of file
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n")
+ raise Exception("ParseError: Unexpected EOF: "+str(mycfg)+": on/before line "+str(lex.lineno))
+ else:
+ return mykeys
+ elif (equ!='='):
+ #invalid token
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Invalid token (not \"=\") "+str(equ)+"\n")
+ raise Exception("ParseError: Invalid token (not '='): "+str(mycfg)+": line "+str(lex.lineno))
+ else:
+ return mykeys
+ val=lex.get_token()
+ if (val==''):
+ #unexpected end of file
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n")
+ raise portage_exception.CorruptionError("ParseError: Unexpected EOF: "+str(mycfg)+": line "+str(lex.lineno))
+ else:
+ return mykeys
+ mykeys[key]=varexpand(val,mykeys)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ raise e.__class__, str(e)+" in "+mycfg
+ return mykeys
+
+#cache expansions of constant strings
+cexpand={}
+def varexpand(mystring,mydict={}):
+ try:
+ return cexpand[" "+mystring]
+ except KeyError:
+ pass
+ """
+ new variable expansion code. Removes quotes, handles \n, etc.
+ This code is used by the configfile code, as well as others (parser)
+ This would be a good bunch of code to port to C.
+ """
+ numvars=0
+ mystring=" "+mystring
+ #in single, double quotes
+ insing=0
+ indoub=0
+ pos=1
+ newstring=" "
+ while (pos<len(mystring)):
+ if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
+ if (indoub):
+ newstring=newstring+"'"
+ else:
+ insing=not insing
+ pos=pos+1
+ continue
+ elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
+ if (insing):
+ newstring=newstring+'"'
+ else:
+ indoub=not indoub
+ pos=pos+1
+ continue
+ if (not insing):
+ #expansion time
+ if (mystring[pos]=="\n"):
+ #convert newlines to spaces
+ newstring=newstring+" "
+ pos=pos+1
+ elif (mystring[pos]=="\\"):
+ #backslash expansion time
+ if (pos+1>=len(mystring)):
+ newstring=newstring+mystring[pos]
+ break
+ else:
+ a=mystring[pos+1]
+ pos=pos+2
+ if a=='a':
+ newstring=newstring+chr(007)
+ elif a=='b':
+ newstring=newstring+chr(010)
+ elif a=='e':
+ newstring=newstring+chr(033)
+ elif (a=='f') or (a=='n'):
+ newstring=newstring+chr(012)
+ elif a=='r':
+ newstring=newstring+chr(015)
+ elif a=='t':
+ newstring=newstring+chr(011)
+ elif a=='v':
+ newstring=newstring+chr(013)
+ elif a!='\n':
+ #remove backslash only, as bash does: this takes care of \\ and \' and \" as well
+ newstring=newstring+mystring[pos-1:pos]
+ continue
+ elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
+ pos=pos+1
+ if mystring[pos]=="{":
+ pos=pos+1
+ braced=True
+ else:
+ braced=False
+ myvstart=pos
+ validchars=string.ascii_letters+string.digits+"_"
+ while mystring[pos] in validchars:
+ if (pos+1)>=len(mystring):
+ if braced:
+ cexpand[mystring]=""
+ return ""
+ else:
+ pos=pos+1
+ break
+ pos=pos+1
+ myvarname=mystring[myvstart:pos]
+ if braced:
+ if mystring[pos]!="}":
+ cexpand[mystring]=""
+ return ""
+ else:
+ pos=pos+1
+ if len(myvarname)==0:
+ cexpand[mystring]=""
+ return ""
+ numvars=numvars+1
+ if mydict.has_key(myvarname):
+ newstring=newstring+mydict[myvarname]
+ else:
+ newstring=newstring+mystring[pos]
+ pos=pos+1
+ else:
+ newstring=newstring+mystring[pos]
+ pos=pos+1
+ if numvars==0:
+ cexpand[mystring]=newstring[1:]
+ return newstring[1:]
+
+def pickle_write(data,filename,debug=0):
+ import cPickle,os
+ try:
+ myf=open(filename,"w")
+ cPickle.dump(data,myf,-1)
+ myf.flush()
+ myf.close()
+ writemsg("Wrote pickle: "+str(filename)+"\n",1)
+ os.chown(myefn,uid,portage_gid)
+ os.chmod(myefn,0664)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ return 0
+ return 1
+
+def pickle_read(filename,default=None,debug=0):
+ import cPickle,os
+ if not os.access(filename, os.R_OK):
+ writemsg("pickle_read(): File not readable. '"+filename+"'\n",1)
+ return default
+ data = None
+ try:
+ myf = open(filename)
+ mypickle = cPickle.Unpickler(myf)
+ mypickle.find_global = None
+ data = mypickle.load()
+ myf.close()
+ del mypickle,myf
+ writemsg("pickle_read(): Loaded pickle. '"+filename+"'\n",1)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! Failed to load pickle: "+str(e)+"\n",1)
+ data = default
+ return data
+
+class ReadOnlyConfig:
+ def __init__(self,filename,strict_keys=0):
+ self.__filename = filename[:]
+ self.__strict_keys = strict_keys
+ self.__mydict = {}
+ self.__dict_was_loaded = False
+ if os.path.isfile(self.__filename):
+ self.__mydict = getconfig(self.__filename)
+ self.__dict_was_loaded = True
+
+ def isLoaded():
+ return self.__dict_was_loaded
+
+ def __getitem__(self,key):
+ if self.__mydict.has_key(key):
+ return self.__mydict[key][:]
+ if self.__strict_keys:
+ raise KeyError("%s not found in config: '%s'" % (key,self.__filename))
+ return ""
+
+ def __setitem__(self,key,value):
+ raise KeyError("This class is not modifiable.")
+
+ def keys(self):
+ return self.__mydict.keys()
+
+ def has_key(self,key):
+ return self.__mydict.has_key(key)
+
+def unique_array(array):
+ """Takes an array and makes sure each element is unique."""
+ mya = []
+ for x in array:
+ if x not in mya:
+ mya.append(x)
+ return mya
+
+
+
+
diff --git a/pym/xpak.py b/pym/xpak.py
new file mode 100644
index 000000000..b6a649420
--- /dev/null
+++ b/pym/xpak.py
@@ -0,0 +1,384 @@
+# Copyright 2001-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-src/portage/pym/xpak.py,v 1.13.2.3 2005/02/26 11:22:38 carpaski Exp $
+cvs_id_string="$Id: xpak.py,v 1.13.2.3 2005/02/26 11:22:38 carpaski Exp $"[5:-2]
+
+# The format for a tbz2/xpak:
+#
+# tbz2: tar.bz2 + xpak + (xpak_offset) + "STOP"
+# xpak: "XPAKPACK" + (index_len) + (data_len) + index + data + "XPAKSTOP"
+# index: (pathname_len) + pathname + (data_offset) + (data_len)
+# index entries are concatenated end-to-end.
+# data: concatenated data chunks, end-to-end.
+#
+# [tarball]XPAKPACKIIIIDDDD[index][data]XPAKSTOPOOOOSTOP
+#
+# (integer) == encodeint(integer) ===> 4 characters (big-endian copy)
+# '+' means concatenate the fields ===> All chunks are strings
+
+import sys,os,string
+from stat import *
+
+def addtolist(mylist,curdir):
+ """(list, dir) --- Takes an array(list) and appends all files from dir down
+ the directory tree. Returns nothing. list is modified."""
+ for x in os.listdir("."):
+ if os.path.isdir(x):
+ os.chdir(x)
+ addtolist(mylist,curdir+x+"/")
+ os.chdir("..")
+ else:
+ if curdir+x not in mylist:
+ mylist.append(curdir+x)
+
+def encodeint(myint):
+ """Takes a 4 byte integer and converts it into a string of 4 characters.
+ Returns the characters in a string."""
+ part1=chr((myint >> 24 ) & 0x000000ff)
+ part2=chr((myint >> 16 ) & 0x000000ff)
+ part3=chr((myint >> 8 ) & 0x000000ff)
+ part4=chr(myint & 0x000000ff)
+ return part1+part2+part3+part4
+
+def decodeint(mystring):
+ """Takes a 4 byte string and converts it into a 4 byte integer.
+ Returns an integer."""
+ myint=0
+ myint=myint+ord(mystring[3])
+ myint=myint+(ord(mystring[2]) << 8)
+ myint=myint+(ord(mystring[1]) << 16)
+ myint=myint+(ord(mystring[0]) << 24)
+ return myint
+
+def xpak(rootdir,outfile=None):
+ """(rootdir,outfile) -- creates an xpak segment of the directory 'rootdir'
+ and under the name 'outfile' if it is specified. Otherwise it returns the
+ xpak segment."""
+ try:
+ origdir=os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ os.chdir(rootdir)
+ mylist=[]
+
+ addtolist(mylist,"")
+ mylist.sort()
+
+ #Our list index has been created
+
+ indexglob=""
+ indexpos=0
+ dataglob=""
+ datapos=0
+ for x in mylist:
+ a=open(x,"r")
+ newglob=a.read()
+ a.close()
+ mydatasize=len(newglob)
+ indexglob=indexglob+encodeint(len(x))+x+encodeint(datapos)+encodeint(mydatasize)
+ indexpos=indexpos+4+len(x)+4+4
+ dataglob=dataglob+newglob
+ datapos=datapos+mydatasize
+ os.chdir(origdir)
+ if outfile:
+ outf=open(outfile,"w")
+ outf.write("XPAKPACK"+encodeint(len(indexglob))+encodeint(len(dataglob)))
+ outf.write(indexglob)
+ outf.write(dataglob)
+ outf.write("XPAKSTOP")
+ outf.close()
+ else:
+ myret="XPAKPACK"+encodeint(len(indexglob))+encodeint(len(dataglob))
+ myret=myret+indexglob+dataglob+"XPAKSTOP"
+ return myret
+
+def xsplit(infile):
+ """(infile) -- Splits the infile into two files.
+ 'infile.index' contains the index segment.
+ 'infile.dat' contails the data segment."""
+ myfile=open(infile,"r")
+ mydat=myfile.read()
+ myfile.close()
+
+ splits = xsplit_mem(mydat)
+ if not splits:
+ return
+
+ myfile=open(infile+".index","w")
+ myfile.write(splits[0])
+ myfile.close()
+ myfile=open(infile+".dat","w")
+ myfile.write(splits[1])
+ myfile.close()
+ return
+
+def xsplit_mem(mydat):
+ if mydat[0:8]!="XPAKPACK":
+ return None
+ if mydat[-8:]!="XPAKSTOP":
+ return None
+ indexsize=decodeint(mydat[8:12])
+ datasize=decodeint(mydat[12:16])
+ return (mydat[16:indexsize+16], mydat[indexsize+16:-8])
+
+def getindex(infile):
+ """(infile) -- grabs the index segment from the infile and returns it."""
+ myfile=open(infile,"r")
+ myheader=myfile.read(16)
+ if myheader[0:8]!="XPAKPACK":
+ myfile.close()
+ return
+ indexsize=decodeint(myheader[8:12])
+ myindex=myfile.read(indexsize)
+ myfile.close()
+ return myindex
+
+def getboth(infile):
+ """(infile) -- grabs the index and data segments from the infile.
+ Returns an array [indexSegment,dataSegment]"""
+ myfile=open(infile,"r")
+ myheader=myfile.read(16)
+ if myheader[0:8]!="XPAKPACK":
+ myfile.close()
+ return
+ indexsize=decodeint(myheader[8:12])
+ datasize=decodeint(myheader[12:16])
+ myindex=myfile.read(indexsize)
+ mydata=myfile.read(datasize)
+ myfile.close()
+ return [myindex,mydata]
+
+def listindex(myindex):
+ """Print to the terminal the filenames listed in the indexglob passed in."""
+ for x in getindex_mem(myindex):
+ print x
+
+def getindex_mem(myindex):
+ """Returns the filenames listed in the indexglob passed in."""
+ myindexlen=len(myindex)
+ startpos=0
+ myret=[]
+ while ((startpos+8)<myindexlen):
+ mytestlen=decodeint(myindex[startpos:startpos+4])
+ myret=myret+[myindex[startpos+4:startpos+4+mytestlen]]
+ startpos=startpos+mytestlen+12
+ return myret
+
+def searchindex(myindex,myitem):
+ """(index,item) -- Finds the offset and length of the file 'item' in the
+ datasegment via the index 'index' provided."""
+ mylen=len(myitem)
+ myindexlen=len(myindex)
+ startpos=0
+ while ((startpos+8)<myindexlen):
+ mytestlen=decodeint(myindex[startpos:startpos+4])
+ if mytestlen==mylen:
+ if myitem==myindex[startpos+4:startpos+4+mytestlen]:
+ #found
+ datapos=decodeint(myindex[startpos+4+mytestlen:startpos+8+mytestlen]);
+ datalen=decodeint(myindex[startpos+8+mytestlen:startpos+12+mytestlen]);
+ return [datapos,datalen]
+ startpos=startpos+mytestlen+12
+
+def getitem(myid,myitem):
+ myindex=myid[0]
+ mydata=myid[1]
+ myloc=searchindex(myindex,myitem)
+ if not myloc:
+ return None
+ return mydata[myloc[0]:myloc[0]+myloc[1]]
+
+def xpand(myid,mydest):
+ myindex=myid[0]
+ mydata=myid[1]
+ try:
+ origdir=os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ os.chdir(mydest)
+ myindexlen=len(myindex)
+ startpos=0
+ while ((startpos+8)<myindexlen):
+ namelen=decodeint(myindex[startpos:startpos+4])
+ datapos=decodeint(myindex[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(myindex[startpos+8+namelen:startpos+12+namelen]);
+ myname=myindex[startpos+4:startpos+4+namelen]
+ dirname=os.path.dirname(myname)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat=open(myname,"w")
+ mydat.write(mydata[datapos:datapos+datalen])
+ mydat.close()
+ startpos=startpos+namelen+12
+ os.chdir(origdir)
+
+class tbz2:
+ def __init__(self,myfile):
+ self.file=myfile
+ self.filestat=None
+ self.index=""
+ self.infosize=0
+ self.xpaksize=0
+ self.indexsize=None
+ self.datasize=None
+ self.indexpos=None
+ self.datapos=None
+ self.scan()
+
+ def decompose(self,datadir,cleanup=1):
+ """Alias for unpackinfo() --- Complement to recompose() but optionally
+ deletes the destination directory. Extracts the xpak from the tbz2 into
+ the directory provided. Raises IOError if scan() fails.
+ Returns result of upackinfo()."""
+ if not self.scan():
+ raise IOError
+ if cleanup and os.path.exists(datadir):
+ # XXX: Potentially bad
+ os.system("rm -Rf "+datadir+"/*")
+ if not os.path.exists(datadir):
+ os.makedirs(datadir)
+ return self.unpackinfo(datadir)
+ def compose(self,datadir,cleanup=0):
+ """Alias for recompose()."""
+ return recompose(datadir,cleanup)
+ def recompose(self,datadir,cleanup=0):
+ """Creates an xpak segment from the datadir provided, truncates the tbz2
+ to the end of regular data if an xpak segment already exists, and adds
+ the new segment to the file with terminating info."""
+ self.scan() # Don't care about condition... We'll rewrite the data anyway.
+ myfile=open(self.file,"a+")
+ if not myfile:
+ raise IOError
+ myfile.seek(-self.xpaksize,2) # 0,2 or -0,2 just mean EOF.
+ myfile.truncate()
+ xpdata=xpak(datadir)
+ myfile.write(xpdata+encodeint(len(xpdata))+"STOP")
+ myfile.flush()
+ myfile.close()
+ if cleanup:
+ # XXX: Potentially bad
+ os.system("rm -Rf "+datadir)
+ return 1
+
+ def scan(self):
+ """Scans the tbz2 to locate the xpak segment and setup internal values.
+ This function is called by relevant functions already."""
+ try:
+ mystat=os.stat(self.file)
+ if self.filestat:
+ changed=0
+ for x in [ST_SIZE, ST_MTIME, ST_CTIME]:
+ if mystat[x] != self.filestat[x]:
+ changed=1
+ if not changed:
+ return 1
+ self.filestat=mystat
+ a=open(self.file,"r")
+ a.seek(-16,2)
+ trailer=a.read()
+ self.infosize=0
+ self.xpaksize=0
+ if trailer[-4:]!="STOP":
+ a.close()
+ return 0
+ if trailer[0:8]!="XPAKSTOP":
+ a.close()
+ return 0
+ self.infosize=decodeint(trailer[8:12])
+ self.xpaksize=self.infosize+8
+ a.seek(-(self.xpaksize),2)
+ header=a.read(16)
+ if header[0:8]!="XPAKPACK":
+ a.close()
+ return 0
+ self.indexsize=decodeint(header[8:12])
+ self.datasize=decodeint(header[12:16])
+ self.indexpos=a.tell()
+ self.index=a.read(self.indexsize)
+ self.datapos=a.tell()
+ a.close()
+ return 2
+ except SystemExit, e:
+ raise
+ except:
+ return 0
+
+ def filelist(self):
+ """Return an array of each file listed in the index."""
+ if not self.scan():
+ return None
+ return getindex_mem(self.index)
+
+ def getfile(self,myfile,mydefault=None):
+ """Finds 'myfile' in the data segment and returns it."""
+ if not self.scan():
+ return None
+ myresult=searchindex(self.index,myfile)
+ if not myresult:
+ return mydefault
+ a=open(self.file,"r")
+ a.seek(self.datapos+myresult[0],0)
+ myreturn=a.read(myresult[1])
+ a.close()
+ return myreturn
+
+ def getelements(self,myfile):
+ """A split/array representation of tbz2.getfile()"""
+ mydat=self.getfile(myfile)
+ if not mydat:
+ return []
+ return string.split(mydat)
+
+ def unpackinfo(self,mydest):
+ """Unpacks all the files from the dataSegment into 'mydest'."""
+ if not self.scan():
+ return 0
+ try:
+ origdir=os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ a=open(self.file,"r")
+ if not os.path.exists(mydest):
+ os.makedirs(mydest)
+ os.chdir(mydest)
+ startpos=0
+ while ((startpos+8)<self.indexsize):
+ namelen=decodeint(self.index[startpos:startpos+4])
+ datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
+ myname=self.index[startpos+4:startpos+4+namelen]
+ dirname=os.path.dirname(myname)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat=open(myname,"w")
+ a.seek(self.datapos+datapos)
+ mydat.write(a.read(datalen))
+ mydat.close()
+ startpos=startpos+namelen+12
+ a.close()
+ os.chdir(origdir)
+ return 1
+
+ def getboth(self):
+ """Returns an array [indexSegment,dataSegment]"""
+ if not self.scan():
+ return None
+
+ a = open(self.file,"r")
+ a.seek(self.datapos)
+ mydata =a.read(self.datasize)
+ a.close()
+
+ return [self.index[:],mydata]
+