obj_properties now uses sets for paths.
[revdep-rebuild-reimplementation.git] / pym / portage / dbapi / vartree.py
blobc68602397ce416a9280c53775bbe802ded94afd3
1 # Copyright 1998-2007 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
3 # $Id: vartree.py 11392 2008-08-10 10:30:20Z zmedico $
5 __all__ = ["PreservedLibsRegistry", "LinkageMap",
6 "vardbapi", "vartree", "dblink"] + \
7 ["write_contents", "tar_contents"]
9 from portage.checksum import perform_md5
10 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, PORTAGE_BIN_PATH, \
11 PRIVATE_PATH, VDB_PATH
12 from portage.data import portage_gid, portage_uid, secpass
13 from portage.dbapi import dbapi
14 from portage.dep import dep_getslot, use_reduce, paren_reduce, isvalidatom, \
15 isjustname, dep_getkey, match_from_list
16 from portage.exception import InvalidAtom, InvalidData, InvalidPackageName, \
17 FileNotFound, PermissionDenied, UnsupportedAPIException
18 from portage.locks import lockdir, unlockdir
19 from portage.output import bold, red, green
20 from portage.update import fixdbentries
21 from portage.util import apply_secpass_permissions, ConfigProtect, ensure_dirs, \
22 writemsg, writemsg_stdout, writemsg_level, \
23 write_atomic, atomic_ofstream, writedict, \
24 grabfile, grabdict, normalize_path, new_protect_filename, getlibpaths
25 from portage.versions import pkgsplit, catpkgsplit, catsplit, best, pkgcmp
27 from portage import listdir, dep_expand, flatten, key_expand, \
28 doebuild_environment, doebuild, env_update, prepare_build_dirs, \
29 abssymlink, movefile, _movefile, bsd_chflags, cpv_getkey
31 from portage.elog import elog_process
32 from portage.elog.filtering import filter_mergephases, filter_unmergephases
34 import os, re, sys, stat, errno, commands, copy, time, subprocess
35 import logging
36 import shlex
37 from itertools import izip
39 try:
40 import cPickle
41 except ImportError:
42 import pickle as cPickle
44 class PreservedLibsRegistry(object):
45 """ This class handles the tracking of preserved library objects """
46 def __init__(self, filename, autocommit=True):
47 """ @param filename: absolute path for saving the preserved libs records
48 @type filename: String
49 @param autocommit: determines if the file is written after every update
50 @type autocommit: Boolean
51 """
52 self._filename = filename
53 self._autocommit = autocommit
54 self.load()
55 self.pruneNonExisting()
57 def load(self):
58 """ Reload the registry data from file """
59 try:
60 self._data = cPickle.load(open(self._filename, "r"))
61 except (EOFError, IOError), e:
62 if isinstance(e, EOFError) or e.errno == errno.ENOENT:
63 self._data = {}
64 elif e.errno == PermissionDenied.errno:
65 raise PermissionDenied(self._filename)
66 else:
67 raise e
69 def store(self):
70 """ Store the registry data to file. No need to call this if autocommit
71 was enabled.
72 """
73 if os.environ.get("SANDBOX_ON") == "1":
74 return
75 try:
76 f = atomic_ofstream(self._filename)
77 cPickle.dump(self._data, f)
78 f.close()
79 except EnvironmentError, e:
80 if e.errno != PermissionDenied.errno:
81 writemsg("!!! %s %s\n" % (e, self._filename), noiselevel=-1)
83 def register(self, cpv, slot, counter, paths):
84 """ Register new objects in the registry. If there is a record with the
85 same packagename (internally derived from cpv) and slot it is
86 overwritten with the new data.
87 @param cpv: package instance that owns the objects
88 @type cpv: CPV (as String)
89 @param slot: the value of SLOT of the given package instance
90 @type slot: String
91 @param counter: vdb counter value for the package instace
92 @type counter: Integer
93 @param paths: absolute paths of objects that got preserved during an update
94 @type paths: List
95 """
96 cp = "/".join(catpkgsplit(cpv)[:2])
97 cps = cp+":"+slot
98 if len(paths) == 0 and cps in self._data \
99 and self._data[cps][0] == cpv and int(self._data[cps][1]) == int(counter):
100 del self._data[cps]
101 elif len(paths) > 0:
102 self._data[cps] = (cpv, counter, paths)
103 if self._autocommit:
104 self.store()
106 def unregister(self, cpv, slot, counter):
107 """ Remove a previous registration of preserved objects for the given package.
108 @param cpv: package instance whose records should be removed
109 @type cpv: CPV (as String)
110 @param slot: the value of SLOT of the given package instance
111 @type slot: String
113 self.register(cpv, slot, counter, [])
115 def pruneNonExisting(self):
116 """ Remove all records for objects that no longer exist on the filesystem. """
117 for cps in self._data.keys():
118 cpv, counter, paths = self._data[cps]
119 paths = [f for f in paths if os.path.exists(f)]
120 if len(paths) > 0:
121 self._data[cps] = (cpv, counter, paths)
122 else:
123 del self._data[cps]
124 if self._autocommit:
125 self.store()
127 def hasEntries(self):
128 """ Check if this registry contains any records. """
129 return len(self._data) > 0
131 def getPreservedLibs(self):
132 """ Return a mapping of packages->preserved objects.
133 @returns mapping of package instances to preserved objects
134 @rtype Dict cpv->list-of-paths
136 rValue = {}
137 for cps in self._data:
138 rValue[self._data[cps][0]] = self._data[cps][2]
139 return rValue
141 class LinkageMap(object):
142 def __init__(self, vardbapi):
143 self._dbapi = vardbapi
144 self._libs = {}
145 self._obj_properties = {}
146 self._defpath = set(getlibpaths())
147 self._obj_key_cache = {}
149 def rebuild(self, include_file=None):
150 libs = {}
151 obj_key_cache = {}
152 obj_properties = {}
153 lines = []
154 for cpv in self._dbapi.cpv_all():
155 lines += self._dbapi.aux_get(cpv, ["NEEDED.ELF.2"])[0].split('\n')
156 # Cache NEEDED.* files avoid doing excessive IO for every rebuild.
157 self._dbapi.flush_cache()
159 if include_file:
160 lines += grabfile(include_file)
162 # have to call scanelf for preserved libs here as they aren't
163 # registered in NEEDED.ELF.2 files
164 if self._dbapi.plib_registry and self._dbapi.plib_registry.getPreservedLibs():
165 args = ["/usr/bin/scanelf", "-qF", "%a;%F;%S;%r;%n"]
166 for items in self._dbapi.plib_registry.getPreservedLibs().values():
167 args += [x.lstrip(".") for x in items]
168 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
169 output = [l[3:] for l in proc.communicate()[0].split("\n")]
170 lines += output
172 for l in lines:
173 if l.strip() == "":
174 continue
175 fields = l.strip("\n").split(";")
176 if len(fields) < 5:
177 print "Error", fields
178 # insufficient field length
179 continue
180 arch = fields[0]
181 obj = fields[1]
182 obj_key = self._generateObjKey(obj)
183 soname = fields[2]
184 path = set([normalize_path(x)
185 for x in filter(None, fields[3].replace(
186 "${ORIGIN}", os.path.dirname(obj)).replace(
187 "$ORIGIN", os.path.dirname(obj)).split(":"))])
188 needed = filter(None, fields[4].split(","))
189 if soname:
190 libs.setdefault(soname, \
191 {arch: {"providers": set(), "consumers": set()}})
192 libs[soname].setdefault(arch, \
193 {"providers": set(), "consumers": set()})
194 libs[soname][arch]["providers"].add(obj_key)
195 for x in needed:
196 libs.setdefault(x, \
197 {arch: {"providers": set(), "consumers": set()}})
198 libs[x].setdefault(arch, {"providers": set(), "consumers": set()})
199 libs[x][arch]["consumers"].add(obj_key)
200 obj_key_cache.setdefault(obj, obj_key)
201 # All object paths are added into the obj_properties tuple
202 obj_properties.setdefault(obj_key, \
203 (arch, needed, path, soname, set()))[4].add(obj)
205 self._libs = libs
206 self._obj_properties = obj_properties
207 self._obj_key_cache = obj_key_cache
209 def _generateObjKey(self, obj):
211 Generate obj key for a given object.
213 @param obj: path to an existing file
214 @type obj: string (example: '/usr/bin/bar')
215 @rtype: 2-tuple of longs or string
216 @return: If obj exists, a 2-tuple of obj's inode and device from a stat
217 call is returned. Otherwise, the obj is returned.
220 try:
221 return os.stat(obj)[1:3]
222 except OSError:
223 # XXX pull this out
224 # from portage.output import teal
225 # writemsg(bold(red("Error in ")) + \
226 # bold(teal("_generateObjKey. Stat failed on %s" % obj)) + '\n')
227 return obj
229 def listBrokenBinaries(self):
231 Find binaries and their needed sonames, which have no providers.
233 @rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
234 @return: The return value is an object -> set-of-sonames mapping, where
235 object is a broken binary and the set consists of sonames needed by
236 object that have no corresponding libraries to fulfill the dependency.
239 class LibraryCache(object):
242 Caches sonames and realpaths associated with paths.
244 The purpose of this class is to prevent multiple calls of
245 os.path.realpath and os.path.isfile on the same paths.
249 def __init__(cache_self):
250 cache_self.cache = {}
252 def get(cache_self, obj):
254 Caches and returns properties associated with an object.
256 @param obj: absolute path (can be symlink)
257 @type obj: string (example: '/usr/lib/libfoo.so')
258 @rtype: 4-tuple with types
259 (string or None, string or None, 2-tuple, Boolean)
260 @return: 4-tuple with the following components:
261 1. arch as a string or None if it does not exist,
262 2. soname as a string or None if it does not exist,
263 3. obj_key as 2-tuple,
264 4. Boolean representing whether the object exists.
265 (example: ('libfoo.so.1', (123L, 456L), True))
268 if obj in cache_self.cache:
269 return cache_self.cache[obj]
270 else:
271 if obj in self._obj_key_cache:
272 obj_key = self._obj_key_cache.get(obj)
273 else:
274 obj_key = self._generateObjKey(obj)
275 # Check that the library exists on the filesystem.
276 if isinstance(obj_key, tuple):
277 # Get the arch and soname from LinkageMap._obj_properties if
278 # it exists. Otherwise, None.
279 arch, _, _, soname, _ = \
280 self._obj_properties.get(obj_key, (None,)*5)
281 return cache_self.cache.setdefault(obj, \
282 (arch, soname, obj_key, True))
283 else:
284 return cache_self.cache.setdefault(obj, \
285 (None, None, obj_key, False))
287 debug = False
288 rValue = {}
289 cache = LibraryCache()
290 providers = self.listProviders()
292 # Iterate over all obj_keys and their providers.
293 for obj_key, sonames in providers.items():
294 arch, _, path, _, objs = self._obj_properties[obj_key]
295 path = path.union(self._defpath)
296 # Iterate over each needed soname and the set of library paths that
297 # fulfill the soname to determine if the dependency is broken.
298 for soname, libraries in sonames.items():
299 # validLibraries is used to store libraries, which satisfy soname,
300 # so if no valid libraries are found, the soname is not satisfied
301 # for obj_key. If unsatisfied, objects associated with obj_key
302 # must be emerged.
303 validLibraries = set()
304 # It could be the case that the library to satisfy the soname is
305 # not in the obj's runpath, but a symlink to the library is (eg
306 # libnvidia-tls.so.1 in nvidia-drivers). Also, since LinkageMap
307 # does not catalog symlinks, broken or missing symlinks may go
308 # unnoticed. As a result of these cases, check that a file with
309 # the same name as the soname exists in obj's runpath.
310 # XXX If we catalog symlinks in LinkageMap, this could be improved.
311 for directory in path:
312 cachedArch, cachedSoname, cachedKey, cachedExists = \
313 cache.get(os.path.join(directory, soname))
314 # Check that this library provides the needed soname. Doing
315 # this, however, will cause consumers of libraries missing
316 # sonames to be unnecessarily emerged. (eg libmix.so)
317 if cachedSoname == soname and cachedArch == arch:
318 validLibraries.add(cachedKey)
319 if debug and cachedKey not in \
320 map(self._obj_key_cache.get, libraries):
321 print "Found provider outside of findProviders:", \
322 os.path.join(directory, soname), "->", \
323 cachedKey
324 # A valid library has been found, so there is no need to
325 # continue.
326 break
327 if debug and cachedKey in self._obj_properties:
328 print "Broken symlink or missing/bad soname:", \
329 os.path.join(directory, soname), '->', \
330 cachedKey, "with soname", cachedSoname, \
331 "but expecting", soname
332 # This conditional checks if there are no libraries to satisfy the
333 # soname (empty set).
334 if not validLibraries:
335 for obj in objs:
336 rValue.setdefault(obj, set()).add(soname)
337 # If no valid libraries have been found by this point, then
338 # there are no files named with the soname within obj's runpath,
339 # but if there are libraries (from the providers mapping), it is
340 # likely that symlinks or the actual libraries are missing.
341 # Thus possible symlinks and missing libraries are added to
342 # rValue in order to emerge corrupt library packages.
343 for lib in libraries:
344 cachedArch, cachedSoname, cachedKey, cachedExists = \
345 cache.get(lib)
346 if not cachedExists:
347 # The library's package needs to be emerged to repair the
348 # missing library.
349 rValue.setdefault(lib, set()).add(soname)
350 else:
351 # A library providing the soname exists in the obj's
352 # runpath, but no file named as the soname exists, so add
353 # the path constructed from the lib's directory and the
354 # soname to rValue to fix cases of vanishing (or modified)
355 # symlinks. This path is not guaranteed to exist, but it
356 # follows the symlink convention found in the majority of
357 # packages.
358 # XXX merge this into block above
359 rValue.setdefault(os.path.join(os.path.dirname(lib), \
360 soname), set()).add(soname)
361 if debug:
362 if not os.path.isfile(lib):
363 print "Missing library:", lib
364 else:
365 print "Possibly missing symlink:", \
366 os.path.join(os.path.dirname(lib), soname)
368 return rValue
370 def listConsumers(self):
371 rValue = {}
372 if not self._libs:
373 self.rebuild()
374 # Iterate over all objects within LinkageMap.
375 for obj_key in self._obj_properties:
376 rValue.setdefault(obj_key, self.findConsumers(obj_key=obj_key))
377 return rValue
379 def listProviders(self):
381 Find the providers for all objects in LinkageMap.
383 @rtype: dict (example:
384 {(123L, 456L): {'libbar.so': set(['/lib/libbar.so.1.5'])}})
385 @return: The return value is an object key -> providers mapping, where
386 providers is a mapping of soname -> set-of-library-paths returned
387 from the findProviders method.
390 rValue = {}
391 if not self._libs:
392 self.rebuild()
393 # Iterate over all objects within LinkageMap.
394 for obj_key in self._obj_properties:
395 # XXX remove this
396 if len(self._obj_properties[obj_key][4]) != 1:
397 writemsg(bold(red(self._obj_properties[obj_key])))
398 rValue.setdefault(obj_key, self.findProviders(obj_key=obj_key))
399 return rValue
401 def isMasterLink(self, obj):
402 basename = os.path.basename(obj)
403 obj_key = self._generateObjKey(obj)
404 if obj_key not in self._obj_properties:
405 raise KeyError("%s (%s) not in object list" % (obj_key, obj))
406 soname = self._obj_properties[obj_key][3]
407 return (len(basename) < len(soname))
409 def listLibraryObjects(self):
410 rValue = []
411 if not self._libs:
412 self.rebuild()
413 for soname in self._libs:
414 for arch in self._libs[soname]:
415 for obj_key in self._libs[soname][arch]["providers"]:
416 rValue.extend(self._obj_properties[obj_key][4])
417 return rValue
419 def getSoname(self, obj):
420 if not self._libs:
421 self.rebuild()
422 if obj not in self._obj_key_cache:
423 raise KeyError("%s not in object list" % obj)
424 return self._obj_properties[self._obj_key_cache[obj]][3]
426 def findProviders(self, obj=None, obj_key=None):
428 Find providers for an object or object key.
430 This method should be called with either an obj or obj_key. If called
431 with both, the obj_key is ignored. If called with neither, KeyError is
432 raised as if an invalid obj was passed.
434 @param obj:
435 @type obj:
436 @param obj_key:
437 @type obj_key:
438 @rtype:
439 @return:
442 rValue = {}
444 if not self._libs:
445 self.rebuild()
447 if obj is not None:
448 obj_key = self._obj_key_cache.get(obj)
449 if obj_key not in self._obj_properties:
450 obj_key = self._generateObjKey(obj)
451 if obj_key not in self._obj_properties:
452 raise KeyError("%s (%s) not in object list" % (obj_key, obj))
453 elif obj_key not in self._obj_properties:
454 raise KeyError("%s not in object list" % obj_key)
456 arch, needed, path, soname, objs = self._obj_properties[obj_key]
457 path = path.union(self._defpath)
458 # XXX test this
459 # path = set(realpath(x) for x in path)
460 for x in needed:
461 rValue[x] = set()
462 if x not in self._libs or arch not in self._libs[x]:
463 continue
464 for y in self._libs[x][arch]["providers"]:
465 objs = self._obj_properties[y][4]
466 # XXX x is an soname, so it should never start with os.sep, right?
467 #if x[0] == os.sep and realpath(x) == realpath(y):
468 # rValue[x].add(y)
469 for o in objs:
470 if os.path.dirname(o) in path:
471 rValue[x].add(o)
473 return rValue
475 def findConsumers(self, obj=None, obj_key=None):
477 Find consumers of an object or object key.
479 This method should be called with either an obj or obj_key. If called
480 with both, the obj_key is ignored. If called with neither, KeyError is
481 raised as if an invalid obj was passed.
483 @param obj:
484 @type obj:
485 @param obj_key:
486 @type obj_key:
487 @rtype:
488 @return:
491 rValue = set()
493 if not self._libs:
494 self.rebuild()
496 if obj is not None:
497 objs = set([obj])
498 obj_key = self._obj_key_cache.get(obj)
499 if obj_key not in self._obj_properties:
500 obj_key = self._generateObjKey(obj)
501 if obj_key not in self._obj_properties:
502 raise KeyError("%s (%s) not in object list" % (obj_key, obj))
503 else:
504 if obj_key not in self._obj_properties:
505 raise KeyError("%s not in object list" % obj_key)
506 objs = self._obj_properties[obj_key][4]
508 objs_dirs = [os.path.dirname(x) for x in objs]
510 # If there is another version of this lib with the
511 # same soname and the master link points to that
512 # other version, this lib will be shadowed and won't
513 # have any consumers.
514 # Only necessary if given obj, which may be a symlink.
515 if obj is not None:
516 soname = self._obj_properties[obj_key][3]
517 obj_dir = os.path.dirname(obj)
518 master_link = os.path.join(obj_dir, soname)
519 try:
520 master_st = os.stat(master_link)
521 obj_st = os.stat(obj)
522 except OSError:
523 pass
524 else:
525 if (obj_st.st_dev, obj_st.st_ino) != \
526 (master_st.st_dev, master_st.st_ino):
527 return set()
529 for soname in self._libs:
530 for arch in self._libs[soname]:
531 if obj_key in self._libs[soname][arch]["providers"]:
532 for consumer_key in self._libs[soname][arch]["consumers"]:
533 _, _, path, _, consumer_objs = \
534 self._obj_properties[consumer_key]
535 # XXX test this
536 #path = [realpath(y) for y in path+self._defpath]
537 path = path.union(self._defpath)
538 # XXX x is an soname, so it should never start with os.sep,
539 # right?
540 #if soname[0] == os.sep and realpath(soname) == realpath(obj):
541 # rValue.add(x)
542 #if realpath(obj_dir) in path:
543 # rValue.add(x)
544 for consumer_obj in consumer_objs:
545 for directory in objs_dirs:
546 if directory in path:
547 rValue.add(consumer_obj)
548 return rValue
550 class vardbapi(dbapi):
552 _excluded_dirs = ["CVS", "lost+found"]
553 _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
554 _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
555 "|".join(_excluded_dirs) + r')$')
557 _aux_cache_version = "1"
558 _owners_cache_version = "1"
560 # Number of uncached packages to trigger cache update, since
561 # it's wasteful to update it for every vdb change.
562 _aux_cache_threshold = 5
564 _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
565 _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
567 def __init__(self, root, categories=None, settings=None, vartree=None):
569 The categories parameter is unused since the dbapi class
570 now has a categories property that is generated from the
571 available packages.
573 self.root = root[:]
575 #cache for category directory mtimes
576 self.mtdircache = {}
578 #cache for dependency checks
579 self.matchcache = {}
581 #cache for cp_list results
582 self.cpcache = {}
584 self.blockers = None
585 if settings is None:
586 from portage import settings
587 self.settings = settings
588 if vartree is None:
589 from portage import db
590 vartree = db[root]["vartree"]
591 self.vartree = vartree
592 self._aux_cache_keys = set(
593 ["CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
594 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
595 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
596 "repository", "RESTRICT" , "SLOT", "USE"])
597 self._aux_cache_obj = None
598 self._aux_cache_filename = os.path.join(self.root,
599 CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
600 self._counter_path = os.path.join(root,
601 CACHE_PATH.lstrip(os.path.sep), "counter")
603 try:
604 self.plib_registry = PreservedLibsRegistry(
605 os.path.join(self.root, PRIVATE_PATH, "preserved_libs_registry"))
606 except PermissionDenied:
607 # apparently this user isn't allowed to access PRIVATE_PATH
608 self.plib_registry = None
610 self.linkmap = LinkageMap(self)
611 self._owners = self._owners_db(self)
613 def getpath(self, mykey, filename=None):
614 rValue = os.path.join(self.root, VDB_PATH, mykey)
615 if filename != None:
616 rValue = os.path.join(rValue, filename)
617 return rValue
619 def cpv_exists(self, mykey):
620 "Tells us whether an actual ebuild exists on disk (no masking)"
621 return os.path.exists(self.getpath(mykey))
623 def cpv_counter(self, mycpv):
624 "This method will grab the COUNTER. Returns a counter value."
625 try:
626 return long(self.aux_get(mycpv, ["COUNTER"])[0])
627 except (KeyError, ValueError):
628 pass
629 cdir = self.getpath(mycpv)
630 cpath = self.getpath(mycpv, filename="COUNTER")
632 # We write our new counter value to a new file that gets moved into
633 # place to avoid filesystem corruption on XFS (unexpected reboot.)
634 corrupted = 0
635 if os.path.exists(cpath):
636 cfile = open(cpath, "r")
637 try:
638 counter = long(cfile.readline())
639 except ValueError:
640 print "portage: COUNTER for", mycpv, "was corrupted; resetting to value of 0"
641 counter = long(0)
642 corrupted = 1
643 cfile.close()
644 elif os.path.exists(cdir):
645 mys = pkgsplit(mycpv)
646 myl = self.match(mys[0], use_cache=0)
647 print mys, myl
648 if len(myl) == 1:
649 try:
650 # Only one package... Counter doesn't matter.
651 write_atomic(cpath, "1")
652 counter = 1
653 except SystemExit, e:
654 raise
655 except Exception, e:
656 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
657 noiselevel=-1)
658 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
659 noiselevel=-1)
660 writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
661 writemsg("!!! %s\n" % e, noiselevel=-1)
662 sys.exit(1)
663 else:
664 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
665 noiselevel=-1)
666 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
667 noiselevel=-1)
668 writemsg("!!! remerge the package.\n", noiselevel=-1)
669 sys.exit(1)
670 else:
671 counter = long(0)
672 if corrupted:
673 # update new global counter file
674 write_atomic(cpath, str(counter))
675 return counter
677 def cpv_inject(self, mycpv):
678 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
679 os.makedirs(self.getpath(mycpv))
680 counter = self.counter_tick(self.root, mycpv=mycpv)
681 # write local package counter so that emerge clean does the right thing
682 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
684 def isInjected(self, mycpv):
685 if self.cpv_exists(mycpv):
686 if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
687 return True
688 if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
689 return True
690 return False
692 def move_ent(self, mylist):
693 origcp = mylist[1]
694 newcp = mylist[2]
696 # sanity check
697 for cp in [origcp, newcp]:
698 if not (isvalidatom(cp) and isjustname(cp)):
699 raise InvalidPackageName(cp)
700 origmatches = self.match(origcp, use_cache=0)
701 moves = 0
702 if not origmatches:
703 return moves
704 for mycpv in origmatches:
705 mycpsplit = catpkgsplit(mycpv)
706 mynewcpv = newcp + "-" + mycpsplit[2]
707 mynewcat = newcp.split("/")[0]
708 if mycpsplit[3] != "r0":
709 mynewcpv += "-" + mycpsplit[3]
710 mycpsplit_new = catpkgsplit(mynewcpv)
711 origpath = self.getpath(mycpv)
712 if not os.path.exists(origpath):
713 continue
714 moves += 1
715 if not os.path.exists(self.getpath(mynewcat)):
716 #create the directory
717 os.makedirs(self.getpath(mynewcat))
718 newpath = self.getpath(mynewcpv)
719 if os.path.exists(newpath):
720 #dest already exists; keep this puppy where it is.
721 continue
722 _movefile(origpath, newpath, mysettings=self.settings)
724 # We need to rename the ebuild now.
725 old_pf = catsplit(mycpv)[1]
726 new_pf = catsplit(mynewcpv)[1]
727 if new_pf != old_pf:
728 try:
729 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
730 os.path.join(newpath, new_pf + ".ebuild"))
731 except EnvironmentError, e:
732 if e.errno != errno.ENOENT:
733 raise
734 del e
735 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
736 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
737 fixdbentries([mylist], newpath)
738 return moves
740 def cp_list(self, mycp, use_cache=1):
741 mysplit=catsplit(mycp)
742 if mysplit[0] == '*':
743 mysplit[0] = mysplit[0][1:]
744 try:
745 mystat = os.stat(self.getpath(mysplit[0]))[stat.ST_MTIME]
746 except OSError:
747 mystat = 0
748 if use_cache and mycp in self.cpcache:
749 cpc = self.cpcache[mycp]
750 if cpc[0] == mystat:
751 return cpc[1][:]
752 cat_dir = self.getpath(mysplit[0])
753 try:
754 dir_list = os.listdir(cat_dir)
755 except EnvironmentError, e:
756 if e.errno == PermissionDenied.errno:
757 raise PermissionDenied(cat_dir)
758 del e
759 dir_list = []
761 returnme = []
762 for x in dir_list:
763 if self._excluded_dirs.match(x) is not None:
764 continue
765 ps = pkgsplit(x)
766 if not ps:
767 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
768 continue
769 if len(mysplit) > 1:
770 if ps[0] == mysplit[1]:
771 returnme.append(mysplit[0]+"/"+x)
772 self._cpv_sort_ascending(returnme)
773 if use_cache:
774 self.cpcache[mycp] = [mystat, returnme[:]]
775 elif mycp in self.cpcache:
776 del self.cpcache[mycp]
777 return returnme
779 def cpv_all(self, use_cache=1):
781 Set use_cache=0 to bypass the portage.cachedir() cache in cases
782 when the accuracy of mtime staleness checks should not be trusted
783 (generally this is only necessary in critical sections that
784 involve merge or unmerge of packages).
786 returnme = []
787 basepath = os.path.join(self.root, VDB_PATH) + os.path.sep
789 if use_cache:
790 from portage import listdir
791 else:
792 def listdir(p, **kwargs):
793 try:
794 return [x for x in os.listdir(p) \
795 if os.path.isdir(os.path.join(p, x))]
796 except EnvironmentError, e:
797 if e.errno == PermissionDenied.errno:
798 raise PermissionDenied(p)
799 del e
800 return []
802 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
803 if self._excluded_dirs.match(x) is not None:
804 continue
805 if not self._category_re.match(x):
806 continue
807 for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
808 if self._excluded_dirs.match(y) is not None:
809 continue
810 subpath = x + "/" + y
811 # -MERGING- should never be a cpv, nor should files.
812 try:
813 if catpkgsplit(subpath) is None:
814 self.invalidentry(os.path.join(self.root, subpath))
815 continue
816 except InvalidData:
817 self.invalidentry(os.path.join(self.root, subpath))
818 continue
819 returnme.append(subpath)
820 return returnme
822 def cp_all(self, use_cache=1):
823 mylist = self.cpv_all(use_cache=use_cache)
824 d={}
825 for y in mylist:
826 if y[0] == '*':
827 y = y[1:]
828 try:
829 mysplit = catpkgsplit(y)
830 except InvalidData:
831 self.invalidentry(self.getpath(y))
832 continue
833 if not mysplit:
834 self.invalidentry(self.getpath(y))
835 continue
836 d[mysplit[0]+"/"+mysplit[1]] = None
837 return d.keys()
839 def checkblockers(self, origdep):
840 pass
842 def _clear_cache(self):
843 self.mtdircache.clear()
844 self.matchcache.clear()
845 self.cpcache.clear()
846 self._aux_cache_obj = None
848 def _add(self, pkg_dblink):
849 self._clear_pkg_cache(pkg_dblink)
851 def _remove(self, pkg_dblink):
852 self._clear_pkg_cache(pkg_dblink)
854 def _clear_pkg_cache(self, pkg_dblink):
855 # Due to 1 second mtime granularity in <python-2.5, mtime checks
856 # are not always sufficient to invalidate vardbapi caches. Therefore,
857 # the caches need to be actively invalidated here.
858 self.mtdircache.pop(pkg_dblink.cat, None)
859 self.matchcache.pop(pkg_dblink.cat, None)
860 self.cpcache.pop(pkg_dblink.mysplit[0], None)
861 from portage import dircache
862 dircache.pop(pkg_dblink.dbcatdir, None)
864 def match(self, origdep, use_cache=1):
865 "caching match function"
866 mydep = dep_expand(
867 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
868 mykey = dep_getkey(mydep)
869 mycat = catsplit(mykey)[0]
870 if not use_cache:
871 if mycat in self.matchcache:
872 del self.mtdircache[mycat]
873 del self.matchcache[mycat]
874 return list(self._iter_match(mydep,
875 self.cp_list(mydep.cp, use_cache=use_cache)))
876 try:
877 curmtime = os.stat(self.root+VDB_PATH+"/"+mycat).st_mtime
878 except (IOError, OSError):
879 curmtime=0
881 if mycat not in self.matchcache or \
882 self.mtdircache[mycat] != curmtime:
883 # clear cache entry
884 self.mtdircache[mycat] = curmtime
885 self.matchcache[mycat] = {}
886 if mydep not in self.matchcache[mycat]:
887 mymatch = list(self._iter_match(mydep,
888 self.cp_list(mydep.cp, use_cache=use_cache)))
889 self.matchcache[mycat][mydep] = mymatch
890 return self.matchcache[mycat][mydep][:]
892 def findname(self, mycpv):
893 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
895 def flush_cache(self):
896 """If the current user has permission and the internal aux_get cache has
897 been updated, save it to disk and mark it unmodified. This is called
898 by emerge after it has loaded the full vdb for use in dependency
899 calculations. Currently, the cache is only written if the user has
900 superuser privileges (since that's required to obtain a lock), but all
901 users have read access and benefit from faster metadata lookups (as
902 long as at least part of the cache is still valid)."""
903 if self._aux_cache is not None and \
904 len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
905 secpass >= 2:
906 self._owners.populate() # index any unindexed contents
907 valid_nodes = set(self.cpv_all())
908 for cpv in self._aux_cache["packages"].keys():
909 if cpv not in valid_nodes:
910 del self._aux_cache["packages"][cpv]
911 del self._aux_cache["modified"]
912 try:
913 f = atomic_ofstream(self._aux_cache_filename)
914 cPickle.dump(self._aux_cache, f, -1)
915 f.close()
916 apply_secpass_permissions(
917 self._aux_cache_filename, gid=portage_gid, mode=0644)
918 except (IOError, OSError), e:
919 pass
920 self._aux_cache["modified"] = set()
922 @property
923 def _aux_cache(self):
924 if self._aux_cache_obj is None:
925 self._aux_cache_init()
926 return self._aux_cache_obj
928 def _aux_cache_init(self):
929 aux_cache = None
930 try:
931 f = open(self._aux_cache_filename)
932 mypickle = cPickle.Unpickler(f)
933 mypickle.find_global = None
934 aux_cache = mypickle.load()
935 f.close()
936 del f
937 except (IOError, OSError, EOFError, cPickle.UnpicklingError), e:
938 if isinstance(e, cPickle.UnpicklingError):
939 writemsg("!!! Error loading '%s': %s\n" % \
940 (self._aux_cache_filename, str(e)), noiselevel=-1)
941 del e
943 if not aux_cache or \
944 not isinstance(aux_cache, dict) or \
945 aux_cache.get("version") != self._aux_cache_version or \
946 not aux_cache.get("packages"):
947 aux_cache = {"version": self._aux_cache_version}
948 aux_cache["packages"] = {}
950 owners = aux_cache.get("owners")
951 if owners is not None:
952 if not isinstance(owners, dict):
953 owners = None
954 elif "version" not in owners:
955 owners = None
956 elif owners["version"] != self._owners_cache_version:
957 owners = None
958 elif "base_names" not in owners:
959 owners = None
960 elif not isinstance(owners["base_names"], dict):
961 owners = None
963 if owners is None:
964 owners = {
965 "base_names" : {},
966 "version" : self._owners_cache_version
968 aux_cache["owners"] = owners
970 aux_cache["modified"] = set()
971 self._aux_cache_obj = aux_cache
973 def aux_get(self, mycpv, wants):
974 """This automatically caches selected keys that are frequently needed
975 by emerge for dependency calculations. The cached metadata is
976 considered valid if the mtime of the package directory has not changed
977 since the data was cached. The cache is stored in a pickled dict
978 object with the following format:
980 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
982 If an error occurs while loading the cache pickle or the version is
983 unrecognized, the cache will simple be recreated from scratch (it is
984 completely disposable).
986 cache_these_wants = self._aux_cache_keys.intersection(wants)
987 for x in wants:
988 if self._aux_cache_keys_re.match(x) is not None:
989 cache_these_wants.add(x)
991 if not cache_these_wants:
992 return self._aux_get(mycpv, wants)
994 cache_these = set(self._aux_cache_keys)
995 cache_these.update(cache_these_wants)
997 mydir = self.getpath(mycpv)
998 mydir_stat = None
999 try:
1000 mydir_stat = os.stat(mydir)
1001 except OSError, e:
1002 if e.errno != errno.ENOENT:
1003 raise
1004 raise KeyError(mycpv)
1005 mydir_mtime = long(mydir_stat.st_mtime)
1006 pkg_data = self._aux_cache["packages"].get(mycpv)
1007 pull_me = cache_these.union(wants)
1008 mydata = {"_mtime_" : mydir_mtime}
1009 cache_valid = False
1010 cache_incomplete = False
1011 cache_mtime = None
1012 metadata = None
1013 if pkg_data is not None:
1014 if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
1015 pkg_data = None
1016 else:
1017 cache_mtime, metadata = pkg_data
1018 if not isinstance(cache_mtime, (long, int)) or \
1019 not isinstance(metadata, dict):
1020 pkg_data = None
1022 if pkg_data:
1023 cache_mtime, metadata = pkg_data
1024 cache_valid = cache_mtime == mydir_mtime
1025 if cache_valid:
1026 mydata.update(metadata)
1027 pull_me.difference_update(mydata)
1029 if pull_me:
1030 # pull any needed data and cache it
1031 aux_keys = list(pull_me)
1032 for k, v in izip(aux_keys,
1033 self._aux_get(mycpv, aux_keys, st=mydir_stat)):
1034 mydata[k] = v
1035 if not cache_valid or cache_these.difference(metadata):
1036 cache_data = {}
1037 if cache_valid and metadata:
1038 cache_data.update(metadata)
1039 for aux_key in cache_these:
1040 cache_data[aux_key] = mydata[aux_key]
1041 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
1042 self._aux_cache["modified"].add(mycpv)
1043 return [mydata[x] for x in wants]
1045 def _aux_get(self, mycpv, wants, st=None):
1046 mydir = self.getpath(mycpv)
1047 if st is None:
1048 try:
1049 st = os.stat(mydir)
1050 except OSError, e:
1051 if e.errno == errno.ENOENT:
1052 raise KeyError(mycpv)
1053 elif e.errno == PermissionDenied.errno:
1054 raise PermissionDenied(mydir)
1055 else:
1056 raise
1057 if not stat.S_ISDIR(st.st_mode):
1058 raise KeyError(mycpv)
1059 results = []
1060 for x in wants:
1061 if x == "_mtime_":
1062 results.append(st.st_mtime)
1063 continue
1064 try:
1065 myf = open(os.path.join(mydir, x), "r")
1066 try:
1067 myd = myf.read()
1068 finally:
1069 myf.close()
1070 # Preserve \n for metadata that is known to
1071 # contain multiple lines.
1072 if self._aux_multi_line_re.match(x) is None:
1073 myd = " ".join(myd.split())
1074 except IOError:
1075 myd = ""
1076 if x == "EAPI" and not myd:
1077 results.append("0")
1078 else:
1079 results.append(myd)
1080 return results
1082 def aux_update(self, cpv, values):
1083 cat, pkg = catsplit(cpv)
1084 mylink = dblink(cat, pkg, self.root, self.settings,
1085 treetype="vartree", vartree=self.vartree)
1086 if not mylink.exists():
1087 raise KeyError(cpv)
1088 for k, v in values.iteritems():
1089 if v:
1090 mylink.setfile(k, v)
1091 else:
1092 try:
1093 os.unlink(os.path.join(self.getpath(cpv), k))
1094 except EnvironmentError:
1095 pass
1097 def counter_tick(self, myroot, mycpv=None):
1098 return self.counter_tick_core(myroot, incrementing=1, mycpv=mycpv)
1100 def get_counter_tick_core(self, myroot, mycpv=None):
1102 Use this method to retrieve the counter instead
1103 of having to trust the value of a global counter
1104 file that can lead to invalid COUNTER
1105 generation. When cache is valid, the package COUNTER
1106 files are not read and we rely on the timestamp of
1107 the package directory to validate cache. The stat
1108 calls should only take a short time, so performance
1109 is sufficient without having to rely on a potentially
1110 corrupt global counter file.
1112 The global counter file located at
1113 $CACHE_PATH/counter serves to record the
1114 counter of the last installed package and
1115 it also corresponds to the total number of
1116 installation actions that have occurred in
1117 the history of this package database.
1119 cp_list = self.cp_list
1120 max_counter = 0
1121 for cp in self.cp_all():
1122 for cpv in cp_list(cp):
1123 try:
1124 counter = int(self.aux_get(cpv, ["COUNTER"])[0])
1125 except (KeyError, OverflowError, ValueError):
1126 continue
1127 if counter > max_counter:
1128 max_counter = counter
1130 new_vdb = False
1131 counter = -1
1132 try:
1133 cfile = open(self._counter_path, "r")
1134 except EnvironmentError, e:
1135 new_vdb = not bool(self.cpv_all())
1136 if not new_vdb:
1137 writemsg("!!! Unable to read COUNTER file: '%s'\n" % \
1138 self._counter_path, noiselevel=-1)
1139 writemsg("!!! %s\n" % str(e), noiselevel=-1)
1140 del e
1141 else:
1142 try:
1143 try:
1144 counter = long(cfile.readline().strip())
1145 finally:
1146 cfile.close()
1147 except (OverflowError, ValueError), e:
1148 writemsg("!!! COUNTER file is corrupt: '%s'\n" % \
1149 self._counter_path, noiselevel=-1)
1150 writemsg("!!! %s\n" % str(e), noiselevel=-1)
1151 del e
1153 # We must ensure that we return a counter
1154 # value that is at least as large as the
1155 # highest one from the installed packages,
1156 # since having a corrupt value that is too low
1157 # can trigger incorrect AUTOCLEAN behavior due
1158 # to newly installed packages having lower
1159 # COUNTERs than the previous version in the
1160 # same slot.
1161 if counter > max_counter:
1162 max_counter = counter
1164 if counter < 0 and not new_vdb:
1165 writemsg("!!! Initializing COUNTER to " + \
1166 "value of %d\n" % max_counter, noiselevel=-1)
1168 return max_counter + 1
1170 def counter_tick_core(self, myroot, incrementing=1, mycpv=None):
1171 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
1172 counter = self.get_counter_tick_core(myroot, mycpv=mycpv) - 1
1173 if incrementing:
1174 #increment counter
1175 counter += 1
1176 # update new global counter file
1177 write_atomic(self._counter_path, str(counter))
1178 return counter
1180 def _dblink(self, cpv):
1181 category, pf = catsplit(cpv)
1182 return dblink(category, pf, self.root,
1183 self.settings, vartree=self.vartree, treetype="vartree")
1185 def removeFromContents(self, pkg, paths, relative_paths=True):
1187 @param pkg: cpv for an installed package
1188 @type pkg: string
1189 @param paths: paths of files to remove from contents
1190 @type paths: iterable
1192 if not hasattr(pkg, "getcontents"):
1193 pkg = self._dblink(pkg)
1194 root = self.root
1195 root_len = len(root) - 1
1196 new_contents = pkg.getcontents().copy()
1197 removed = 0
1199 for filename in paths:
1200 filename = normalize_path(filename)
1201 if relative_paths:
1202 relative_filename = filename
1203 else:
1204 relative_filename = filename[root_len:]
1205 contents_key = pkg._match_contents(relative_filename, root)
1206 if contents_key:
1207 del new_contents[contents_key]
1208 removed += 1
1210 if removed:
1211 f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
1212 write_contents(new_contents, root, f)
1213 f.close()
1215 class _owners_cache(object):
1217 This class maintains an hash table that serves to index package
1218 contents by mapping the basename of file to a list of possible
1219 packages that own it. This is used to optimize owner lookups
1220 by narrowing the search down to a smaller number of packages.
1222 try:
1223 from hashlib import md5 as _new_hash
1224 except ImportError:
1225 from md5 import new as _new_hash
1227 _hash_bits = 16
1228 _hex_chars = _hash_bits / 4
1230 def __init__(self, vardb):
1231 self._vardb = vardb
1233 def add(self, cpv):
1234 root_len = len(self._vardb.root)
1235 contents = self._vardb._dblink(cpv).getcontents()
1236 pkg_hash = self._hash_pkg(cpv)
1237 if not contents:
1238 # Empty path is a code used to represent empty contents.
1239 self._add_path("", pkg_hash)
1240 for x in contents:
1241 self._add_path(x[root_len:], pkg_hash)
1242 self._vardb._aux_cache["modified"].add(cpv)
1244 def _add_path(self, path, pkg_hash):
1246 Empty path is a code that represents empty contents.
1248 if path:
1249 name = os.path.basename(path.rstrip(os.path.sep))
1250 if not name:
1251 return
1252 else:
1253 name = path
1254 name_hash = self._hash_str(name)
1255 base_names = self._vardb._aux_cache["owners"]["base_names"]
1256 pkgs = base_names.get(name_hash)
1257 if pkgs is None:
1258 pkgs = {}
1259 base_names[name_hash] = pkgs
1260 pkgs[pkg_hash] = None
1262 def _hash_str(self, s):
1263 h = self._new_hash()
1264 h.update(s)
1265 h = h.hexdigest()
1266 h = h[-self._hex_chars:]
1267 h = int(h, 16)
1268 return h
1270 def _hash_pkg(self, cpv):
1271 counter, mtime = self._vardb.aux_get(
1272 cpv, ["COUNTER", "_mtime_"])
1273 try:
1274 counter = int(counter)
1275 except ValueError:
1276 counter = 0
1277 return (cpv, counter, mtime)
1279 class _owners_db(object):
1281 def __init__(self, vardb):
1282 self._vardb = vardb
1284 def populate(self):
1285 self._populate()
1287 def _populate(self):
1288 owners_cache = vardbapi._owners_cache(self._vardb)
1289 cached_hashes = set()
1290 base_names = self._vardb._aux_cache["owners"]["base_names"]
1292 # Take inventory of all cached package hashes.
1293 for name, hash_values in base_names.items():
1294 if not isinstance(hash_values, dict):
1295 del base_names[name]
1296 continue
1297 cached_hashes.update(hash_values)
1299 # Create sets of valid package hashes and uncached packages.
1300 uncached_pkgs = set()
1301 hash_pkg = owners_cache._hash_pkg
1302 valid_pkg_hashes = set()
1303 for cpv in self._vardb.cpv_all():
1304 hash_value = hash_pkg(cpv)
1305 valid_pkg_hashes.add(hash_value)
1306 if hash_value not in cached_hashes:
1307 uncached_pkgs.add(cpv)
1309 # Cache any missing packages.
1310 for cpv in uncached_pkgs:
1311 owners_cache.add(cpv)
1313 # Delete any stale cache.
1314 stale_hashes = cached_hashes.difference(valid_pkg_hashes)
1315 if stale_hashes:
1316 for base_name_hash, bucket in base_names.items():
1317 for hash_value in stale_hashes.intersection(bucket):
1318 del bucket[hash_value]
1319 if not bucket:
1320 del base_names[base_name_hash]
1322 return owners_cache
1324 def get_owners(self, path_iter):
1326 @return the owners as a dblink -> set(files) mapping.
1328 owners = {}
1329 for owner, f in self.iter_owners(path_iter):
1330 owned_files = owners.get(owner)
1331 if owned_files is None:
1332 owned_files = set()
1333 owners[owner] = owned_files
1334 owned_files.add(f)
1335 return owners
1337 def getFileOwnerMap(self, path_iter):
1338 owners = self.get_owners(path_iter)
1339 file_owners = {}
1340 for pkg_dblink, files in owners.iteritems():
1341 for f in files:
1342 owner_set = file_owners.get(f)
1343 if owner_set is None:
1344 owner_set = set()
1345 file_owners[f] = owner_set
1346 owner_set.add(pkg_dblink)
1347 return file_owners
1349 def iter_owners(self, path_iter):
1351 Iterate over tuples of (dblink, path). In order to avoid
1352 consuming too many resources for too much time, resources
1353 are only allocated for the duration of a given iter_owners()
1354 call. Therefore, to maximize reuse of resources when searching
1355 for multiple files, it's best to search for them all in a single
1356 call.
1359 owners_cache = self._populate()
1361 vardb = self._vardb
1362 root = vardb.root
1363 hash_pkg = owners_cache._hash_pkg
1364 hash_str = owners_cache._hash_str
1365 base_names = self._vardb._aux_cache["owners"]["base_names"]
1367 dblink_cache = {}
1369 def dblink(cpv):
1370 x = dblink_cache.get(cpv)
1371 if x is None:
1372 x = self._vardb._dblink(cpv)
1373 dblink_cache[cpv] = x
1374 return x
1376 for path in path_iter:
1377 name = os.path.basename(path.rstrip(os.path.sep))
1378 if not name:
1379 continue
1381 name_hash = hash_str(name)
1382 pkgs = base_names.get(name_hash)
1383 if pkgs is not None:
1384 for hash_value in pkgs:
1385 if not isinstance(hash_value, tuple) or \
1386 len(hash_value) != 3:
1387 continue
1388 cpv, counter, mtime = hash_value
1389 if not isinstance(cpv, basestring):
1390 continue
1391 try:
1392 current_hash = hash_pkg(cpv)
1393 except KeyError:
1394 continue
1396 if current_hash != hash_value:
1397 continue
1398 if dblink(cpv).isowner(path, root):
1399 yield dblink(cpv), path
1401 class vartree(object):
1402 "this tree will scan a var/db/pkg database located at root (passed to init)"
1403 def __init__(self, root="/", virtual=None, clone=None, categories=None,
1404 settings=None):
1405 if clone:
1406 writemsg("vartree.__init__(): deprecated " + \
1407 "use of clone parameter\n", noiselevel=-1)
1408 self.root = clone.root[:]
1409 self.dbapi = copy.deepcopy(clone.dbapi)
1410 self.populated = 1
1411 from portage import config
1412 self.settings = config(clone=clone.settings)
1413 else:
1414 self.root = root[:]
1415 if settings is None:
1416 from portage import settings
1417 self.settings = settings # for key_expand calls
1418 if categories is None:
1419 categories = settings.categories
1420 self.dbapi = vardbapi(self.root, categories=categories,
1421 settings=settings, vartree=self)
1422 self.populated = 1
1424 def getpath(self, mykey, filename=None):
1425 return self.dbapi.getpath(mykey, filename=filename)
1427 def zap(self, mycpv):
1428 return
1430 def inject(self, mycpv):
1431 return
1433 def get_provide(self, mycpv):
1434 myprovides = []
1435 mylines = None
1436 try:
1437 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
1438 if mylines:
1439 myuse = myuse.split()
1440 mylines = flatten(use_reduce(paren_reduce(mylines), uselist=myuse))
1441 for myprovide in mylines:
1442 mys = catpkgsplit(myprovide)
1443 if not mys:
1444 mys = myprovide.split("/")
1445 myprovides += [mys[0] + "/" + mys[1]]
1446 return myprovides
1447 except SystemExit, e:
1448 raise
1449 except Exception, e:
1450 mydir = os.path.join(self.root, VDB_PATH, mycpv)
1451 writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
1452 noiselevel=-1)
1453 if mylines:
1454 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
1455 noiselevel=-1)
1456 writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
1457 return []
1459 def get_all_provides(self):
1460 myprovides = {}
1461 for node in self.getallcpv():
1462 for mykey in self.get_provide(node):
1463 if mykey in myprovides:
1464 myprovides[mykey] += [node]
1465 else:
1466 myprovides[mykey] = [node]
1467 return myprovides
1469 def dep_bestmatch(self, mydep, use_cache=1):
1470 "compatibility method -- all matches, not just visible ones"
1471 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
1472 mymatch = best(self.dbapi.match(
1473 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
1474 use_cache=use_cache))
1475 if mymatch is None:
1476 return ""
1477 else:
1478 return mymatch
1480 def dep_match(self, mydep, use_cache=1):
1481 "compatibility method -- we want to see all matches, not just visible ones"
1482 #mymatch = match(mydep,self.dbapi)
1483 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
1484 if mymatch is None:
1485 return []
1486 else:
1487 return mymatch
1489 def exists_specific(self, cpv):
1490 return self.dbapi.cpv_exists(cpv)
1492 def getallcpv(self):
1493 """temporary function, probably to be renamed --- Gets a list of all
1494 category/package-versions installed on the system."""
1495 return self.dbapi.cpv_all()
1497 def getallnodes(self):
1498 """new behavior: these are all *unmasked* nodes. There may or may not be available
1499 masked package for nodes in this nodes list."""
1500 return self.dbapi.cp_all()
1502 def exists_specific_cat(self, cpv, use_cache=1):
1503 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
1504 settings=self.settings)
1505 a = catpkgsplit(cpv)
1506 if not a:
1507 return 0
1508 mylist = listdir(self.getpath(a[0]), EmptyOnError=1)
1509 for x in mylist:
1510 b = pkgsplit(x)
1511 if not b:
1512 self.dbapi.invalidentry(self.getpath(a[0], filename=x))
1513 continue
1514 if a[1] == b[0]:
1515 return 1
1516 return 0
1518 def getebuildpath(self, fullpackage):
1519 cat, package = catsplit(fullpackage)
1520 return self.getpath(fullpackage, filename=package+".ebuild")
1522 def getnode(self, mykey, use_cache=1):
1523 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
1524 settings=self.settings)
1525 if not mykey:
1526 return []
1527 mysplit = catsplit(mykey)
1528 mydirlist = listdir(self.getpath(mysplit[0]),EmptyOnError=1)
1529 returnme = []
1530 for x in mydirlist:
1531 mypsplit = pkgsplit(x)
1532 if not mypsplit:
1533 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
1534 continue
1535 if mypsplit[0] == mysplit[1]:
1536 appendme = [mysplit[0]+"/"+x, [mysplit[0], mypsplit[0], mypsplit[1], mypsplit[2]]]
1537 returnme.append(appendme)
1538 return returnme
1541 def getslot(self, mycatpkg):
1542 "Get a slot for a catpkg; assume it exists."
1543 try:
1544 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
1545 except KeyError:
1546 return ""
1548 def hasnode(self, mykey, use_cache):
1549 """Does the particular node (cat/pkg key) exist?"""
1550 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
1551 settings=self.settings)
1552 mysplit = catsplit(mykey)
1553 mydirlist = listdir(self.getpath(mysplit[0]), EmptyOnError=1)
1554 for x in mydirlist:
1555 mypsplit = pkgsplit(x)
1556 if not mypsplit:
1557 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
1558 continue
1559 if mypsplit[0] == mysplit[1]:
1560 return 1
1561 return 0
1563 def populate(self):
1564 self.populated=1
1566 class dblink(object):
1568 This class provides an interface to the installed package database
1569 At present this is implemented as a text backend in /var/db/pkg.
1572 import re
1573 _normalize_needed = re.compile(r'.*//.*|^[^/]|.+/$|(^|.*/)\.\.?(/.*|$)')
1574 _contents_split_counts = {
1575 "dev": 2,
1576 "dir": 2,
1577 "fif": 2,
1578 "obj": 4,
1579 "sym": 5
1582 # When looping over files for merge/unmerge, temporarily yield to the
1583 # scheduler each time this many files are processed.
1584 _file_merge_yield_interval = 20
1586 def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
1587 vartree=None, blockers=None, scheduler=None):
1589 Creates a DBlink object for a given CPV.
1590 The given CPV may not be present in the database already.
1592 @param cat: Category
1593 @type cat: String
1594 @param pkg: Package (PV)
1595 @type pkg: String
1596 @param myroot: Typically ${ROOT}
1597 @type myroot: String (Path)
1598 @param mysettings: Typically portage.config
1599 @type mysettings: An instance of portage.config
1600 @param treetype: one of ['porttree','bintree','vartree']
1601 @type treetype: String
1602 @param vartree: an instance of vartree corresponding to myroot.
1603 @type vartree: vartree
1606 self.cat = cat
1607 self.pkg = pkg
1608 self.mycpv = self.cat + "/" + self.pkg
1609 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
1610 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
1611 self.treetype = treetype
1612 if vartree is None:
1613 from portage import db
1614 vartree = db[myroot]["vartree"]
1615 self.vartree = vartree
1616 self._blockers = blockers
1617 self._scheduler = scheduler
1619 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
1620 self.dbcatdir = self.dbroot+"/"+cat
1621 self.dbpkgdir = self.dbcatdir+"/"+pkg
1622 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
1623 self.dbdir = self.dbpkgdir
1625 self._lock_vdb = None
1627 self.settings = mysettings
1628 if self.settings == 1:
1629 raise ValueError
1631 self.myroot=myroot
1632 protect_obj = ConfigProtect(myroot,
1633 mysettings.get("CONFIG_PROTECT","").split(),
1634 mysettings.get("CONFIG_PROTECT_MASK","").split())
1635 self.updateprotect = protect_obj.updateprotect
1636 self.isprotected = protect_obj.isprotected
1637 self._installed_instance = None
1638 self.contentscache = None
1639 self._contents_inodes = None
1640 self._contents_basenames = None
1642 def lockdb(self):
1643 if self._lock_vdb:
1644 raise AssertionError("Lock already held.")
1645 # At least the parent needs to exist for the lock file.
1646 ensure_dirs(self.dbroot)
1647 self._lock_vdb = lockdir(self.dbroot)
1649 def unlockdb(self):
1650 if self._lock_vdb:
1651 unlockdir(self._lock_vdb)
1652 self._lock_vdb = None
1654 def getpath(self):
1655 "return path to location of db information (for >>> informational display)"
1656 return self.dbdir
1658 def exists(self):
1659 "does the db entry exist? boolean."
1660 return os.path.exists(self.dbdir)
1662 def delete(self):
1664 Remove this entry from the database
1666 if not os.path.exists(self.dbdir):
1667 return
1669 # Check validity of self.dbdir before attempting to remove it.
1670 if not self.dbdir.startswith(self.dbroot):
1671 writemsg("portage.dblink.delete(): invalid dbdir: %s\n" % \
1672 self.dbdir, noiselevel=-1)
1673 return
1674 import shutil
1675 shutil.rmtree(self.dbdir)
1676 self.vartree.dbapi._remove(self)
1678 def clearcontents(self):
1680 For a given db entry (self), erase the CONTENTS values.
1682 if os.path.exists(self.dbdir+"/CONTENTS"):
1683 os.unlink(self.dbdir+"/CONTENTS")
1685 def _clear_contents_cache(self):
1686 self.contentscache = None
1687 self._contents_inodes = None
1688 self._contents_basenames = None
1690 def getcontents(self):
1692 Get the installed files of a given package (aka what that package installed)
1694 contents_file = os.path.join(self.dbdir, "CONTENTS")
1695 if self.contentscache is not None:
1696 return self.contentscache
1697 pkgfiles = {}
1698 try:
1699 myc = open(contents_file,"r")
1700 except EnvironmentError, e:
1701 if e.errno != errno.ENOENT:
1702 raise
1703 del e
1704 self.contentscache = pkgfiles
1705 return pkgfiles
1706 mylines = myc.readlines()
1707 myc.close()
1708 null_byte = "\0"
1709 normalize_needed = self._normalize_needed
1710 contents_split_counts = self._contents_split_counts
1711 myroot = self.myroot
1712 if myroot == os.path.sep:
1713 myroot = None
1714 pos = 0
1715 errors = []
1716 for pos, line in enumerate(mylines):
1717 if null_byte in line:
1718 # Null bytes are a common indication of corruption.
1719 errors.append((pos + 1, "Null byte found in CONTENTS entry"))
1720 continue
1721 line = line.rstrip("\n")
1722 # Split on " " so that even file paths that
1723 # end with spaces can be handled.
1724 mydat = line.split(" ")
1725 entry_type = mydat[0] # empty string if line is empty
1726 correct_split_count = contents_split_counts.get(entry_type)
1727 if correct_split_count and len(mydat) > correct_split_count:
1728 # Apparently file paths contain spaces, so reassemble
1729 # the split have the correct_split_count.
1730 newsplit = [entry_type]
1731 spaces_total = len(mydat) - correct_split_count
1732 if entry_type == "sym":
1733 try:
1734 splitter = mydat.index("->", 2, len(mydat) - 2)
1735 except ValueError:
1736 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1737 continue
1738 spaces_in_path = splitter - 2
1739 spaces_in_target = spaces_total - spaces_in_path
1740 newsplit.append(" ".join(mydat[1:splitter]))
1741 newsplit.append("->")
1742 target_end = splitter + spaces_in_target + 2
1743 newsplit.append(" ".join(mydat[splitter + 1:target_end]))
1744 newsplit.extend(mydat[target_end:])
1745 else:
1746 path_end = spaces_total + 2
1747 newsplit.append(" ".join(mydat[1:path_end]))
1748 newsplit.extend(mydat[path_end:])
1749 mydat = newsplit
1751 # we do this so we can remove from non-root filesystems
1752 # (use the ROOT var to allow maintenance on other partitions)
1753 try:
1754 if normalize_needed.match(mydat[1]):
1755 mydat[1] = normalize_path(mydat[1])
1756 if not mydat[1].startswith(os.path.sep):
1757 mydat[1] = os.path.sep + mydat[1]
1758 if myroot:
1759 mydat[1] = os.path.join(myroot, mydat[1].lstrip(os.path.sep))
1760 if mydat[0] == "obj":
1761 #format: type, mtime, md5sum
1762 pkgfiles[mydat[1]] = [mydat[0], mydat[3], mydat[2]]
1763 elif mydat[0] == "dir":
1764 #format: type
1765 pkgfiles[mydat[1]] = [mydat[0]]
1766 elif mydat[0] == "sym":
1767 #format: type, mtime, dest
1768 pkgfiles[mydat[1]] = [mydat[0], mydat[4], mydat[3]]
1769 elif mydat[0] == "dev":
1770 #format: type
1771 pkgfiles[mydat[1]] = [mydat[0]]
1772 elif mydat[0]=="fif":
1773 #format: type
1774 pkgfiles[mydat[1]] = [mydat[0]]
1775 else:
1776 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1777 except (KeyError, IndexError):
1778 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1779 if errors:
1780 writemsg("!!! Parse error in '%s'\n" % contents_file, noiselevel=-1)
1781 for pos, e in errors:
1782 writemsg("!!! line %d: %s\n" % (pos, e), noiselevel=-1)
1783 self.contentscache = pkgfiles
1784 return pkgfiles
1786 def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
1787 ldpath_mtimes=None, others_in_slot=None):
1789 Calls prerm
1790 Unmerges a given package (CPV)
1791 calls postrm
1792 calls cleanrm
1793 calls env_update
1795 @param pkgfiles: files to unmerge (generally self.getcontents() )
1796 @type pkgfiles: Dictionary
1797 @param trimworld: Remove CPV from world file if True, not if False
1798 @type trimworld: Boolean
1799 @param cleanup: cleanup to pass to doebuild (see doebuild)
1800 @type cleanup: Boolean
1801 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1802 @type ldpath_mtimes: Dictionary
1803 @param others_in_slot: all dblink instances in this slot, excluding self
1804 @type others_in_slot: list
1805 @rtype: Integer
1806 @returns:
1807 1. os.EX_OK if everything went well.
1808 2. return code of the failed phase (for prerm, postrm, cleanrm)
1810 Notes:
1811 The caller must ensure that lockdb() and unlockdb() are called
1812 before and after this method.
1814 showMessage = self._display_merge
1815 if self.vartree.dbapi._categories is not None:
1816 self.vartree.dbapi._categories = None
1817 # When others_in_slot is supplied, the security check has already been
1818 # done for this slot, so it shouldn't be repeated until the next
1819 # replacement or unmerge operation.
1820 if others_in_slot is None:
1821 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1822 slot_matches = self.vartree.dbapi.match(
1823 "%s:%s" % (dep_getkey(self.mycpv), slot))
1824 others_in_slot = []
1825 for cur_cpv in slot_matches:
1826 if cur_cpv == self.mycpv:
1827 continue
1828 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1829 self.vartree.root, self.settings, vartree=self.vartree,
1830 treetype="vartree"))
1832 retval = self._security_check([self] + others_in_slot)
1833 if retval:
1834 return retval
1836 contents = self.getcontents()
1837 # Now, don't assume that the name of the ebuild is the same as the
1838 # name of the dir; the package may have been moved.
1839 myebuildpath = None
1840 ebuild_phase = "prerm"
1841 log_path = None
1842 mystuff = os.listdir(self.dbdir)
1843 for x in mystuff:
1844 if x.endswith(".ebuild"):
1845 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1846 if x[:-7] != self.pkg:
1847 # Clean up after vardbapi.move_ent() breakage in
1848 # portage versions before 2.1.2
1849 os.rename(os.path.join(self.dbdir, x), myebuildpath)
1850 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1851 break
1853 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1854 if myebuildpath:
1855 try:
1856 doebuild_environment(myebuildpath, "prerm", self.myroot,
1857 self.settings, 0, 0, self.vartree.dbapi)
1858 except UnsupportedAPIException, e:
1859 # Sometimes this happens due to corruption of the EAPI file.
1860 writemsg("!!! FAILED prerm: %s\n" % \
1861 os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
1862 writemsg("%s\n" % str(e), noiselevel=-1)
1863 myebuildpath = None
1864 else:
1865 catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
1866 ensure_dirs(os.path.dirname(catdir), uid=portage_uid,
1867 gid=portage_gid, mode=070, mask=0)
1869 builddir_lock = None
1870 catdir_lock = None
1871 scheduler = self._scheduler
1872 retval = -1
1873 try:
1874 if myebuildpath:
1875 catdir_lock = lockdir(catdir)
1876 ensure_dirs(catdir,
1877 uid=portage_uid, gid=portage_gid,
1878 mode=070, mask=0)
1879 builddir_lock = lockdir(
1880 self.settings["PORTAGE_BUILDDIR"])
1881 try:
1882 unlockdir(catdir_lock)
1883 finally:
1884 catdir_lock = None
1886 prepare_build_dirs(self.myroot, self.settings, 1)
1887 log_path = self.settings.get("PORTAGE_LOG_FILE")
1889 if scheduler is None:
1890 retval = doebuild(myebuildpath, ebuild_phase, self.myroot,
1891 self.settings, cleanup=cleanup, use_cache=0,
1892 mydbapi=self.vartree.dbapi, tree=self.treetype,
1893 vartree=self.vartree)
1894 else:
1895 retval = scheduler.dblinkEbuildPhase(
1896 self, self.vartree.dbapi, myebuildpath, ebuild_phase)
1898 # XXX: Decide how to handle failures here.
1899 if retval != os.EX_OK:
1900 writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
1902 self._unmerge_pkgfiles(pkgfiles, others_in_slot)
1904 # Remove the registration of preserved libs for this pkg instance
1905 plib_registry = self.vartree.dbapi.plib_registry
1906 plib_registry.unregister(self.mycpv, self.settings["SLOT"],
1907 self.vartree.dbapi.cpv_counter(self.mycpv))
1909 if myebuildpath:
1910 ebuild_phase = "postrm"
1911 if scheduler is None:
1912 retval = doebuild(myebuildpath, ebuild_phase, self.myroot,
1913 self.settings, use_cache=0, tree=self.treetype,
1914 mydbapi=self.vartree.dbapi, vartree=self.vartree)
1915 else:
1916 retval = scheduler.dblinkEbuildPhase(
1917 self, self.vartree.dbapi, myebuildpath, ebuild_phase)
1919 # XXX: Decide how to handle failures here.
1920 if retval != os.EX_OK:
1921 writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
1923 # regenerate reverse NEEDED map
1924 self.vartree.dbapi.linkmap.rebuild()
1926 # remove preserved libraries that don't have any consumers left
1927 # FIXME: this code is quite ugly and can likely be optimized in several ways
1928 plib_dict = plib_registry.getPreservedLibs()
1929 for cpv in plib_dict:
1930 plib_dict[cpv].sort()
1931 # for the loop below to work correctly, we need all
1932 # symlinks to come before the actual files, such that
1933 # the recorded symlinks (sonames) will be resolved into
1934 # their real target before the object is found not to be
1935 # in the reverse NEEDED map
1936 def symlink_compare(x, y):
1937 if os.path.islink(x):
1938 if os.path.islink(y):
1939 return 0
1940 else:
1941 return -1
1942 elif os.path.islink(y):
1943 return 1
1944 else:
1945 return 0
1947 plib_dict[cpv].sort(symlink_compare)
1948 for f in plib_dict[cpv]:
1949 if not os.path.exists(f):
1950 continue
1951 unlink_list = []
1952 consumers = self.vartree.dbapi.linkmap.findConsumers(f)
1953 if not consumers:
1954 unlink_list.append(f)
1955 else:
1956 keep=False
1957 for c in consumers:
1958 if c not in self.getcontents():
1959 keep=True
1960 break
1961 if not keep:
1962 unlink_list.append(f)
1963 for obj in unlink_list:
1964 try:
1965 if os.path.islink(obj):
1966 obj_type = "sym"
1967 else:
1968 obj_type = "obj"
1969 os.unlink(obj)
1970 showMessage("<<< !needed %s %s\n" % (obj_type, obj))
1971 except OSError, e:
1972 if e.errno == errno.ENOENT:
1973 pass
1974 else:
1975 raise e
1976 plib_registry.pruneNonExisting()
1978 finally:
1979 if builddir_lock:
1980 try:
1981 if myebuildpath:
1982 if retval != os.EX_OK:
1983 msg_lines = []
1984 msg = ("The '%s' " % ebuild_phase) + \
1985 ("phase of the '%s' package " % self.mycpv) + \
1986 ("has failed with exit value %s." % retval)
1987 from textwrap import wrap
1988 msg_lines.extend(wrap(msg, 72))
1989 msg_lines.append("")
1991 ebuild_name = os.path.basename(myebuildpath)
1992 ebuild_dir = os.path.dirname(myebuildpath)
1993 msg = "The problem occurred while executing " + \
1994 ("the ebuild file named '%s' " % ebuild_name) + \
1995 ("located in the '%s' directory. " \
1996 % ebuild_dir) + \
1997 "If necessary, manually remove " + \
1998 "the environment.bz2 file and/or the " + \
1999 "ebuild file located in that directory."
2000 msg_lines.extend(wrap(msg, 72))
2001 msg_lines.append("")
2003 msg = "Removal " + \
2004 "of the environment.bz2 file is " + \
2005 "preferred since it may allow the " + \
2006 "removal phases to execute successfully. " + \
2007 "The ebuild will be " + \
2008 "sourced and the eclasses " + \
2009 "from the current portage tree will be used " + \
2010 "when necessary. Removal of " + \
2011 "the ebuild file will cause the " + \
2012 "pkg_prerm() and pkg_postrm() removal " + \
2013 "phases to be skipped entirely."
2014 msg_lines.extend(wrap(msg, 72))
2016 self._eerror(ebuild_phase, msg_lines)
2018 # process logs created during pre/postrm
2019 elog_process(self.mycpv, self.settings, phasefilter=filter_unmergephases)
2020 if retval == os.EX_OK:
2021 doebuild(myebuildpath, "cleanrm", self.myroot,
2022 self.settings, tree="vartree",
2023 mydbapi=self.vartree.dbapi,
2024 vartree=self.vartree)
2025 finally:
2026 unlockdir(builddir_lock)
2027 try:
2028 if myebuildpath and not catdir_lock:
2029 # Lock catdir for removal if empty.
2030 catdir_lock = lockdir(catdir)
2031 finally:
2032 if catdir_lock:
2033 try:
2034 os.rmdir(catdir)
2035 except OSError, e:
2036 if e.errno not in (errno.ENOENT,
2037 errno.ENOTEMPTY, errno.EEXIST):
2038 raise
2039 del e
2040 unlockdir(catdir_lock)
2042 if log_path is not None and os.path.exists(log_path):
2043 # Restore this since it gets lost somewhere above and it
2044 # needs to be set for _display_merge() to be able to log.
2045 # Note that the log isn't necessarily supposed to exist
2046 # since if PORT_LOGDIR is unset then it's a temp file
2047 # so it gets cleaned above.
2048 self.settings["PORTAGE_LOG_FILE"] = log_path
2049 else:
2050 self.settings.pop("PORTAGE_LOG_FILE", None)
2052 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
2053 contents=contents, env=self.settings.environ(),
2054 writemsg_level=self._display_merge)
2055 return os.EX_OK
2057 def _display_merge(self, msg, level=0, noiselevel=0):
2058 if self._scheduler is not None:
2059 self._scheduler.dblinkDisplayMerge(self, msg,
2060 level=level, noiselevel=noiselevel)
2061 return
2062 writemsg_level(msg, level=level, noiselevel=noiselevel)
2064 def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
2067 Unmerges the contents of a package from the liveFS
2068 Removes the VDB entry for self
2070 @param pkgfiles: typically self.getcontents()
2071 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
2072 @param others_in_slot: all dblink instances in this slot, excluding self
2073 @type others_in_slot: list
2074 @rtype: None
2077 showMessage = self._display_merge
2078 scheduler = self._scheduler
2080 if not pkgfiles:
2081 showMessage("No package files given... Grabbing a set.\n")
2082 pkgfiles = self.getcontents()
2084 if others_in_slot is None:
2085 others_in_slot = []
2086 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
2087 slot_matches = self.vartree.dbapi.match(
2088 "%s:%s" % (dep_getkey(self.mycpv), slot))
2089 for cur_cpv in slot_matches:
2090 if cur_cpv == self.mycpv:
2091 continue
2092 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2093 self.vartree.root, self.settings,
2094 vartree=self.vartree, treetype="vartree"))
2096 dest_root = normalize_path(self.vartree.root).rstrip(os.path.sep) + \
2097 os.path.sep
2098 dest_root_len = len(dest_root) - 1
2100 conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE)
2101 cfgfiledict = grabdict(conf_mem_file)
2102 stale_confmem = []
2104 unmerge_orphans = "unmerge-orphans" in self.settings.features
2106 if pkgfiles:
2107 self.updateprotect()
2108 mykeys = pkgfiles.keys()
2109 mykeys.sort()
2110 mykeys.reverse()
2112 #process symlinks second-to-last, directories last.
2113 mydirs = []
2114 ignored_unlink_errnos = (
2115 errno.EBUSY, errno.ENOENT,
2116 errno.ENOTDIR, errno.EISDIR)
2117 ignored_rmdir_errnos = (
2118 errno.EEXIST, errno.ENOTEMPTY,
2119 errno.EBUSY, errno.ENOENT,
2120 errno.ENOTDIR, errno.EISDIR)
2121 modprotect = os.path.join(self.vartree.root, "lib/modules/")
2123 def unlink(file_name, lstatobj):
2124 if bsd_chflags:
2125 if lstatobj.st_flags != 0:
2126 bsd_chflags.lchflags(file_name, 0)
2127 parent_name = os.path.dirname(file_name)
2128 # Use normal stat/chflags for the parent since we want to
2129 # follow any symlinks to the real parent directory.
2130 pflags = os.stat(parent_name).st_flags
2131 if pflags != 0:
2132 bsd_chflags.chflags(parent_name, 0)
2133 try:
2134 if not stat.S_ISLNK(lstatobj.st_mode):
2135 # Remove permissions to ensure that any hardlinks to
2136 # suid/sgid files are rendered harmless.
2137 os.chmod(file_name, 0)
2138 os.unlink(file_name)
2139 finally:
2140 if bsd_chflags and pflags != 0:
2141 # Restore the parent flags we saved before unlinking
2142 bsd_chflags.chflags(parent_name, pflags)
2144 def show_unmerge(zing, desc, file_type, file_name):
2145 showMessage("%s %s %s %s\n" % \
2146 (zing, desc.ljust(8), file_type, file_name))
2147 for i, objkey in enumerate(mykeys):
2149 if scheduler is not None and \
2150 0 == i % self._file_merge_yield_interval:
2151 scheduler.scheduleYield()
2153 obj = normalize_path(objkey)
2154 file_data = pkgfiles[objkey]
2155 file_type = file_data[0]
2156 statobj = None
2157 try:
2158 statobj = os.stat(obj)
2159 except OSError:
2160 pass
2161 lstatobj = None
2162 try:
2163 lstatobj = os.lstat(obj)
2164 except (OSError, AttributeError):
2165 pass
2166 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
2167 if lstatobj is None:
2168 show_unmerge("---", "!found", file_type, obj)
2169 continue
2170 if obj.startswith(dest_root):
2171 relative_path = obj[dest_root_len:]
2172 is_owned = False
2173 for dblnk in others_in_slot:
2174 if dblnk.isowner(relative_path, dest_root):
2175 is_owned = True
2176 break
2177 if is_owned:
2178 # A new instance of this package claims the file, so
2179 # don't unmerge it.
2180 show_unmerge("---", "replaced", file_type, obj)
2181 continue
2182 elif relative_path in cfgfiledict:
2183 stale_confmem.append(relative_path)
2184 # next line includes a tweak to protect modules from being unmerged,
2185 # but we don't protect modules from being overwritten if they are
2186 # upgraded. We effectively only want one half of the config protection
2187 # functionality for /lib/modules. For portage-ng both capabilities
2188 # should be able to be independently specified.
2189 if obj.startswith(modprotect):
2190 show_unmerge("---", "cfgpro", file_type, obj)
2191 continue
2193 # Don't unlink symlinks to directories here since that can
2194 # remove /lib and /usr/lib symlinks.
2195 if unmerge_orphans and \
2196 lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
2197 not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
2198 not self.isprotected(obj):
2199 try:
2200 unlink(obj, lstatobj)
2201 except EnvironmentError, e:
2202 if e.errno not in ignored_unlink_errnos:
2203 raise
2204 del e
2205 show_unmerge("<<<", "", file_type, obj)
2206 continue
2208 lmtime = str(lstatobj[stat.ST_MTIME])
2209 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
2210 show_unmerge("---", "!mtime", file_type, obj)
2211 continue
2213 if pkgfiles[objkey][0] == "dir":
2214 if statobj is None or not stat.S_ISDIR(statobj.st_mode):
2215 show_unmerge("---", "!dir", file_type, obj)
2216 continue
2217 mydirs.append(obj)
2218 elif pkgfiles[objkey][0] == "sym":
2219 if not islink:
2220 show_unmerge("---", "!sym", file_type, obj)
2221 continue
2222 # Go ahead and unlink symlinks to directories here when
2223 # they're actually recorded as symlinks in the contents.
2224 # Normally, symlinks such as /lib -> lib64 are not recorded
2225 # as symlinks in the contents of a package. If a package
2226 # installs something into ${D}/lib/, it is recorded in the
2227 # contents as a directory even if it happens to correspond
2228 # to a symlink when it's merged to the live filesystem.
2229 try:
2230 unlink(obj, lstatobj)
2231 show_unmerge("<<<", "", file_type, obj)
2232 except (OSError, IOError),e:
2233 if e.errno not in ignored_unlink_errnos:
2234 raise
2235 del e
2236 show_unmerge("!!!", "", file_type, obj)
2237 elif pkgfiles[objkey][0] == "obj":
2238 if statobj is None or not stat.S_ISREG(statobj.st_mode):
2239 show_unmerge("---", "!obj", file_type, obj)
2240 continue
2241 mymd5 = None
2242 try:
2243 mymd5 = perform_md5(obj, calc_prelink=1)
2244 except FileNotFound, e:
2245 # the file has disappeared between now and our stat call
2246 show_unmerge("---", "!obj", file_type, obj)
2247 continue
2249 # string.lower is needed because db entries used to be in upper-case. The
2250 # string.lower allows for backwards compatibility.
2251 if mymd5 != pkgfiles[objkey][2].lower():
2252 show_unmerge("---", "!md5", file_type, obj)
2253 continue
2254 try:
2255 unlink(obj, lstatobj)
2256 except (OSError, IOError), e:
2257 if e.errno not in ignored_unlink_errnos:
2258 raise
2259 del e
2260 show_unmerge("<<<", "", file_type, obj)
2261 elif pkgfiles[objkey][0] == "fif":
2262 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
2263 show_unmerge("---", "!fif", file_type, obj)
2264 continue
2265 show_unmerge("---", "", file_type, obj)
2266 elif pkgfiles[objkey][0] == "dev":
2267 show_unmerge("---", "", file_type, obj)
2269 mydirs.sort()
2270 mydirs.reverse()
2272 for obj in mydirs:
2273 try:
2274 if bsd_chflags:
2275 lstatobj = os.lstat(obj)
2276 if lstatobj.st_flags != 0:
2277 bsd_chflags.lchflags(obj, 0)
2278 parent_name = os.path.dirname(obj)
2279 # Use normal stat/chflags for the parent since we want to
2280 # follow any symlinks to the real parent directory.
2281 pflags = os.stat(parent_name).st_flags
2282 if pflags != 0:
2283 bsd_chflags.chflags(parent_name, 0)
2284 try:
2285 os.rmdir(obj)
2286 finally:
2287 if bsd_chflags and pflags != 0:
2288 # Restore the parent flags we saved before unlinking
2289 bsd_chflags.chflags(parent_name, pflags)
2290 show_unmerge("<<<", "", "dir", obj)
2291 except EnvironmentError, e:
2292 if e.errno not in ignored_rmdir_errnos:
2293 raise
2294 if e.errno != errno.ENOENT:
2295 show_unmerge("---", "!empty", "dir", obj)
2296 del e
2298 # Remove stale entries from config memory.
2299 if stale_confmem:
2300 for filename in stale_confmem:
2301 del cfgfiledict[filename]
2302 writedict(cfgfiledict, conf_mem_file)
2304 #remove self from vartree database so that our own virtual gets zapped if we're the last node
2305 self.vartree.zap(self.mycpv)
2307 def isowner(self, filename, destroot):
2308 """
2309 Check if a file belongs to this package. This may
2310 result in a stat call for the parent directory of
2311 every installed file, since the inode numbers are
2312 used to work around the problem of ambiguous paths
2313 caused by symlinked directories. The results of
2314 stat calls are cached to optimize multiple calls
2315 to this method.
2317 @param filename:
2318 @type filename:
2319 @param destroot:
2320 @type destroot:
2321 @rtype: Boolean
2322 @returns:
2323 1. True if this package owns the file.
2324 2. False if this package does not own the file.
2326 return bool(self._match_contents(filename, destroot))
2328 def _match_contents(self, filename, destroot):
2330 The matching contents entry is returned, which is useful
2331 since the path may differ from the one given by the caller,
2332 due to symlinks.
2334 @rtype: String
2335 @return: the contents entry corresponding to the given path, or False
2336 if the file is not owned by this package.
2339 destfile = normalize_path(
2340 os.path.join(destroot, filename.lstrip(os.path.sep)))
2342 pkgfiles = self.getcontents()
2343 if pkgfiles and destfile in pkgfiles:
2344 return destfile
2345 if pkgfiles:
2346 basename = os.path.basename(destfile)
2347 if self._contents_basenames is None:
2348 self._contents_basenames = set(
2349 os.path.basename(x) for x in pkgfiles)
2350 if basename not in self._contents_basenames:
2351 # This is a shortcut that, in most cases, allows us to
2352 # eliminate this package as an owner without the need
2353 # to examine inode numbers of parent directories.
2354 return False
2356 # Use stat rather than lstat since we want to follow
2357 # any symlinks to the real parent directory.
2358 parent_path = os.path.dirname(destfile)
2359 try:
2360 parent_stat = os.stat(parent_path)
2361 except EnvironmentError, e:
2362 if e.errno != errno.ENOENT:
2363 raise
2364 del e
2365 return False
2366 if self._contents_inodes is None:
2367 self._contents_inodes = {}
2368 parent_paths = set()
2369 for x in pkgfiles:
2370 p_path = os.path.dirname(x)
2371 if p_path in parent_paths:
2372 continue
2373 parent_paths.add(p_path)
2374 try:
2375 s = os.stat(p_path)
2376 except OSError:
2377 pass
2378 else:
2379 inode_key = (s.st_dev, s.st_ino)
2380 # Use lists of paths in case multiple
2381 # paths reference the same inode.
2382 p_path_list = self._contents_inodes.get(inode_key)
2383 if p_path_list is None:
2384 p_path_list = []
2385 self._contents_inodes[inode_key] = p_path_list
2386 if p_path not in p_path_list:
2387 p_path_list.append(p_path)
2388 p_path_list = self._contents_inodes.get(
2389 (parent_stat.st_dev, parent_stat.st_ino))
2390 if p_path_list:
2391 for p_path in p_path_list:
2392 x = os.path.join(p_path, basename)
2393 if x in pkgfiles:
2394 return x
2396 return False
2398 def _preserve_libs(self, srcroot, destroot, mycontents, counter, inforoot):
2399 showMessage = self._display_merge
2400 # read global reverse NEEDED map
2401 linkmap = self.vartree.dbapi.linkmap
2402 linkmap.rebuild(include_file=os.path.join(inforoot, "NEEDED.ELF.2"))
2403 liblist = linkmap.listLibraryObjects()
2405 # get list of libraries from old package instance
2406 root_len = len(self.myroot) - 1
2407 old_contents = set(p[root_len:] \
2408 for p in self._installed_instance.getcontents())
2409 old_libs = old_contents.intersection(liblist)
2411 # get list of libraries from new package instance
2412 mylibs = set([os.path.join(os.sep, x) for x in mycontents]).intersection(liblist)
2414 # check which libs are present in the old, but not the new package instance
2415 candidates = old_libs.difference(mylibs)
2417 for x in old_contents:
2418 if os.path.islink(x) and os.path.realpath(x) in candidates and x not in mycontents:
2419 candidates.add(x)
2421 provider_cache = {}
2422 consumer_cache = {}
2424 # ignore any libs that are only internally used by the package
2425 def has_external_consumers(lib, contents, otherlibs):
2426 consumers = consumer_cache.get(lib)
2427 if consumers is None:
2428 consumers = linkmap.findConsumers(lib)
2429 consumer_cache[lib] = consumers
2430 contents_without_libs = [x for x in contents if x not in otherlibs]
2432 # just used by objects that will be autocleaned
2433 if len(consumers.difference(contents_without_libs)) == 0:
2434 return False
2435 # used by objects that are referenced as well, need to check those
2436 # recursively to break any reference cycles
2437 elif len(consumers.difference(contents)) == 0:
2438 otherlibs = set(otherlibs)
2439 for ol in otherlibs.intersection(consumers):
2440 if has_external_consumers(ol, contents, otherlibs.difference([lib])):
2441 return True
2442 return False
2443 # used by external objects directly
2444 else:
2445 return True
2447 for lib in list(candidates):
2448 if not has_external_consumers(lib, old_contents, candidates):
2449 candidates.remove(lib)
2450 continue
2451 if linkmap.isMasterLink(lib):
2452 candidates.remove(lib)
2453 continue
2454 # only preserve the lib if there is no other copy to use for each consumer
2455 keep = False
2457 lib_consumers = consumer_cache.get(lib)
2458 if lib_consumers is None:
2459 lib_consumers = linkmap.findConsumers(lib)
2460 consumer_cache[lib] = lib_consumers
2462 for c in lib_consumers:
2463 localkeep = True
2464 providers = provider_cache.get(c)
2465 if providers is None:
2466 providers = linkmap.findProviders(c)
2467 provider_cache[c] = providers
2469 for soname in providers:
2470 if lib in providers[soname]:
2471 for p in providers[soname]:
2472 if p not in candidates or os.path.exists(os.path.join(srcroot, p.lstrip(os.sep))):
2473 localkeep = False
2474 break
2475 break
2476 if localkeep:
2477 keep = True
2478 if not keep:
2479 candidates.remove(lib)
2480 continue
2482 del mylibs, mycontents, old_contents, liblist
2484 # inject files that should be preserved into our image dir
2485 import shutil
2486 preserve_paths = []
2487 candidates_stack = list(candidates)
2488 while candidates_stack:
2489 x = candidates_stack.pop()
2490 # skip existing files so the 'new' libs aren't overwritten
2491 if os.path.exists(os.path.join(srcroot, x.lstrip(os.sep))):
2492 continue
2493 showMessage("injecting %s into %s\n" % (x, srcroot),
2494 noiselevel=-1)
2495 if not os.path.exists(os.path.join(destroot, x.lstrip(os.sep))):
2496 showMessage("%s does not exist so can't be preserved\n" % x,
2497 noiselevel=-1)
2498 continue
2499 mydir = os.path.join(srcroot, os.path.dirname(x).lstrip(os.sep))
2500 if not os.path.exists(mydir):
2501 os.makedirs(mydir)
2503 # resolve symlinks and extend preserve list
2504 # NOTE: we're extending the list in the loop to emulate recursion to
2505 # also get indirect symlinks
2506 if os.path.islink(x):
2507 linktarget = os.readlink(x)
2508 os.symlink(linktarget, os.path.join(srcroot, x.lstrip(os.sep)))
2509 if linktarget[0] != os.sep:
2510 linktarget = os.path.join(os.path.dirname(x), linktarget)
2511 if linktarget not in candidates:
2512 candidates.add(linktarget)
2513 candidates_stack.append(linktarget)
2514 else:
2515 shutil.copy2(os.path.join(destroot, x.lstrip(os.sep)),
2516 os.path.join(srcroot, x.lstrip(os.sep)))
2517 preserve_paths.append(x)
2519 del candidates
2521 # keep track of the libs we preserved
2522 self.vartree.dbapi.plib_registry.register(self.mycpv, self.settings["SLOT"], counter, preserve_paths)
2524 del preserve_paths
2526 def _collision_protect(self, srcroot, destroot, mypkglist, mycontents):
2527 collision_ignore = set([normalize_path(myignore) for myignore in \
2528 shlex.split(self.settings.get("COLLISION_IGNORE", ""))])
2530 showMessage = self._display_merge
2531 scheduler = self._scheduler
2532 stopmerge = False
2533 collisions = []
2534 destroot = normalize_path(destroot).rstrip(os.path.sep) + \
2535 os.path.sep
2536 showMessage("%s checking %d files for package collisions\n" % \
2537 (green("*"), len(mycontents)))
2538 for i, f in enumerate(mycontents):
2539 if i % 1000 == 0 and i != 0:
2540 showMessage("%d files checked ...\n" % i)
2542 if scheduler is not None and \
2543 0 == i % self._file_merge_yield_interval:
2544 scheduler.scheduleYield()
2546 dest_path = normalize_path(
2547 os.path.join(destroot, f.lstrip(os.path.sep)))
2548 try:
2549 dest_lstat = os.lstat(dest_path)
2550 except EnvironmentError, e:
2551 if e.errno == errno.ENOENT:
2552 del e
2553 continue
2554 elif e.errno == errno.ENOTDIR:
2555 del e
2556 # A non-directory is in a location where this package
2557 # expects to have a directory.
2558 dest_lstat = None
2559 parent_path = dest_path
2560 while len(parent_path) > len(destroot):
2561 parent_path = os.path.dirname(parent_path)
2562 try:
2563 dest_lstat = os.lstat(parent_path)
2564 break
2565 except EnvironmentError, e:
2566 if e.errno != errno.ENOTDIR:
2567 raise
2568 del e
2569 if not dest_lstat:
2570 raise AssertionError(
2571 "unable to find non-directory " + \
2572 "parent for '%s'" % dest_path)
2573 dest_path = parent_path
2574 f = os.path.sep + dest_path[len(destroot):]
2575 if f in collisions:
2576 continue
2577 else:
2578 raise
2579 if f[0] != "/":
2580 f="/"+f
2581 isowned = False
2582 for ver in [self] + mypkglist:
2583 if (ver.isowner(f, destroot) or ver.isprotected(f)):
2584 isowned = True
2585 break
2586 if not isowned:
2587 stopmerge = True
2588 if collision_ignore:
2589 if f in collision_ignore:
2590 stopmerge = False
2591 else:
2592 for myignore in collision_ignore:
2593 if f.startswith(myignore + os.path.sep):
2594 stopmerge = False
2595 break
2596 if stopmerge:
2597 collisions.append(f)
2598 return collisions
2600 def _security_check(self, installed_instances):
2601 if not installed_instances:
2602 return 0
2604 showMessage = self._display_merge
2605 scheduler = self._scheduler
2607 file_paths = set()
2608 for dblnk in installed_instances:
2609 file_paths.update(dblnk.getcontents())
2610 inode_map = {}
2611 real_paths = set()
2612 for i, path in enumerate(file_paths):
2614 if scheduler is not None and \
2615 0 == i % self._file_merge_yield_interval:
2616 scheduler.scheduleYield()
2618 try:
2619 s = os.lstat(path)
2620 except OSError, e:
2621 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
2622 raise
2623 del e
2624 continue
2625 if not stat.S_ISREG(s.st_mode):
2626 continue
2627 path = os.path.realpath(path)
2628 if path in real_paths:
2629 continue
2630 real_paths.add(path)
2631 if s.st_nlink > 1 and \
2632 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
2633 k = (s.st_dev, s.st_ino)
2634 inode_map.setdefault(k, []).append((path, s))
2635 suspicious_hardlinks = []
2636 for path_list in inode_map.itervalues():
2637 path, s = path_list[0]
2638 if len(path_list) == s.st_nlink:
2639 # All hardlinks seem to be owned by this package.
2640 continue
2641 suspicious_hardlinks.append(path_list)
2642 if not suspicious_hardlinks:
2643 return 0
2645 msg = []
2646 msg.append("suid/sgid file(s) " + \
2647 "with suspicious hardlink(s):")
2648 msg.append("")
2649 for path_list in suspicious_hardlinks:
2650 for path, s in path_list:
2651 msg.append("\t%s" % path)
2652 msg.append("")
2653 msg.append("See the Gentoo Security Handbook " + \
2654 "guide for advice on how to proceed.")
2656 self._eerror("preinst", msg)
2658 return 1
2660 def _eerror(self, phase, lines):
2661 from portage.elog.messages import eerror as _eerror
2662 if self._scheduler is None:
2663 for l in lines:
2664 _eerror(l, phase=phase, key=self.settings.mycpv)
2665 else:
2666 self._scheduler.dblinkElog(self,
2667 phase, _eerror, lines)
2669 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
2670 mydbapi=None, prev_mtimes=None):
2673 This function does the following:
2675 calls self._preserve_libs if FEATURES=preserve-libs
2676 calls self._collision_protect if FEATURES=collision-protect
2677 calls doebuild(mydo=pkg_preinst)
2678 Merges the package to the livefs
2679 unmerges old version (if required)
2680 calls doebuild(mydo=pkg_postinst)
2681 calls env_update
2682 calls elog_process
2684 @param srcroot: Typically this is ${D}
2685 @type srcroot: String (Path)
2686 @param destroot: Path to merge to (usually ${ROOT})
2687 @type destroot: String (Path)
2688 @param inforoot: root of the vardb entry ?
2689 @type inforoot: String (Path)
2690 @param myebuild: path to the ebuild that we are processing
2691 @type myebuild: String (Path)
2692 @param mydbapi: dbapi which is handed to doebuild.
2693 @type mydbapi: portdbapi instance
2694 @param prev_mtimes: { Filename:mtime } mapping for env_update
2695 @type prev_mtimes: Dictionary
2696 @rtype: Boolean
2697 @returns:
2698 1. 0 on success
2699 2. 1 on failure
2701 secondhand is a list of symlinks that have been skipped due to their target
2702 not existing; we will merge these symlinks at a later time.
2705 showMessage = self._display_merge
2706 scheduler = self._scheduler
2708 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
2709 destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep
2711 if not os.path.isdir(srcroot):
2712 showMessage("!!! Directory Not Found: D='%s'\n" % srcroot,
2713 level=logging.ERROR, noiselevel=-1)
2714 return 1
2716 inforoot_slot_file = os.path.join(inforoot, "SLOT")
2717 slot = None
2718 try:
2719 f = open(inforoot_slot_file)
2720 try:
2721 slot = f.read().strip()
2722 finally:
2723 f.close()
2724 except EnvironmentError, e:
2725 if e.errno != errno.ENOENT:
2726 raise
2727 del e
2729 if slot is None:
2730 slot = ""
2732 def eerror(lines):
2733 self._eerror("preinst", lines)
2735 if slot != self.settings["SLOT"]:
2736 showMessage("!!! WARNING: Expected SLOT='%s', got '%s'\n" % \
2737 (self.settings["SLOT"], slot), level=logging.WARN)
2739 if not os.path.exists(self.dbcatdir):
2740 os.makedirs(self.dbcatdir)
2742 otherversions = []
2743 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
2744 otherversions.append(v.split("/")[1])
2746 # filter any old-style virtual matches
2747 slot_matches = [cpv for cpv in self.vartree.dbapi.match(
2748 "%s:%s" % (cpv_getkey(self.mycpv), slot)) \
2749 if cpv_getkey(cpv) == cpv_getkey(self.mycpv)]
2751 if self.mycpv not in slot_matches and \
2752 self.vartree.dbapi.cpv_exists(self.mycpv):
2753 # handle multislot or unapplied slotmove
2754 slot_matches.append(self.mycpv)
2756 others_in_slot = []
2757 from portage import config
2758 for cur_cpv in slot_matches:
2759 # Clone the config in case one of these has to be unmerged since
2760 # we need it to have private ${T} etc... for things like elog.
2761 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2762 self.vartree.root, config(clone=self.settings),
2763 vartree=self.vartree, treetype="vartree",
2764 scheduler=self._scheduler))
2766 retval = self._security_check(others_in_slot)
2767 if retval:
2768 return retval
2770 if slot_matches:
2771 # Used by self.isprotected().
2772 max_dblnk = None
2773 max_counter = -1
2774 for dblnk in others_in_slot:
2775 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
2776 if cur_counter > max_counter:
2777 max_counter = cur_counter
2778 max_dblnk = dblnk
2779 self._installed_instance = max_dblnk
2781 # get current counter value (counter_tick also takes care of incrementing it)
2782 # XXX Need to make this destroot, but it needs to be initialized first. XXX
2783 # XXX bis: leads to some invalidentry() call through cp_all().
2784 # Note: The counter is generated here but written later because preserve_libs
2785 # needs the counter value but has to be before dbtmpdir is made (which
2786 # has to be before the counter is written) - genone
2787 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
2789 # Save this for unregistering preserved-libs if the merge fails.
2790 self.settings["COUNTER"] = str(counter)
2791 self.settings.backup_changes("COUNTER")
2793 myfilelist = []
2794 mylinklist = []
2795 def onerror(e):
2796 raise
2797 for parent, dirs, files in os.walk(srcroot, onerror=onerror):
2798 for f in files:
2799 file_path = os.path.join(parent, f)
2800 file_mode = os.lstat(file_path).st_mode
2801 if stat.S_ISREG(file_mode):
2802 myfilelist.append(file_path[len(srcroot):])
2803 elif stat.S_ISLNK(file_mode):
2804 # Note: os.walk puts symlinks to directories in the "dirs"
2805 # list and it does not traverse them since that could lead
2806 # to an infinite recursion loop.
2807 mylinklist.append(file_path[len(srcroot):])
2809 # If there are no files to merge, and an installed package in the same
2810 # slot has files, it probably means that something went wrong.
2811 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
2812 not myfilelist and not mylinklist and others_in_slot:
2813 installed_files = None
2814 for other_dblink in others_in_slot:
2815 installed_files = other_dblink.getcontents()
2816 if not installed_files:
2817 continue
2818 from textwrap import wrap
2819 wrap_width = 72
2820 msg = []
2821 d = (
2822 self.mycpv,
2823 other_dblink.mycpv
2825 msg.extend(wrap(("The '%s' package will not install " + \
2826 "any files, but the currently installed '%s'" + \
2827 " package has the following files: ") % d, wrap_width))
2828 msg.append("")
2829 msg.extend(sorted(installed_files))
2830 msg.append("")
2831 msg.append("package %s NOT merged" % self.mycpv)
2832 msg.append("")
2833 msg.extend(wrap(
2834 ("Manually run `emerge --unmerge =%s` " % \
2835 other_dblink.mycpv) + "if you really want to " + \
2836 "remove the above files. Set " + \
2837 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in " + \
2838 "/etc/make.conf if you do not want to " + \
2839 "abort in cases like this.",
2840 wrap_width))
2841 eerror(msg)
2842 if installed_files:
2843 return 1
2845 # Preserve old libs if they are still in use
2846 if slot_matches and "preserve-libs" in self.settings.features:
2847 self._preserve_libs(srcroot, destroot, myfilelist+mylinklist, counter, inforoot)
2849 # check for package collisions
2850 blockers = None
2851 if self._blockers is not None:
2852 # This is only supposed to be called when
2853 # the vdb is locked, like it is here.
2854 blockers = self._blockers()
2855 if blockers is None:
2856 blockers = []
2857 collisions = self._collision_protect(srcroot, destroot,
2858 others_in_slot + blockers, myfilelist + mylinklist)
2860 # Make sure the ebuild environment is initialized and that ${T}/elog
2861 # exists for logging of collision-protect eerror messages.
2862 if myebuild is None:
2863 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
2864 doebuild_environment(myebuild, "preinst", destroot,
2865 self.settings, 0, 0, mydbapi)
2866 prepare_build_dirs(destroot, self.settings, cleanup)
2868 if collisions:
2869 collision_protect = "collision-protect" in self.settings.features
2870 msg = "This package will overwrite one or more files that" + \
2871 " may belong to other packages (see list below)."
2872 if not collision_protect:
2873 msg += " Add \"collision-protect\" to FEATURES in" + \
2874 " make.conf if you would like the merge to abort" + \
2875 " in cases like this."
2876 if self.settings.get("PORTAGE_QUIET") != "1":
2877 msg += " You can use a command such as" + \
2878 " `portageq owners / <filename>` to identify the" + \
2879 " installed package that owns a file. If portageq" + \
2880 " reports that only one package owns a file then do NOT" + \
2881 " file a bug report. A bug report is only useful if it" + \
2882 " identifies at least two or more packages that are known" + \
2883 " to install the same file(s)." + \
2884 " If a collision occurs and you" + \
2885 " can not explain where the file came from then you" + \
2886 " should simply ignore the collision since there is not" + \
2887 " enough information to determine if a real problem" + \
2888 " exists. Please do NOT file a bug report at" + \
2889 " http://bugs.gentoo.org unless you report exactly which" + \
2890 " two packages install the same file(s). Once again," + \
2891 " please do NOT file a bug report unless you have" + \
2892 " completely understood the above message."
2894 self.settings["EBUILD_PHASE"] = "preinst"
2895 from textwrap import wrap
2896 msg = wrap(msg, 70)
2897 if collision_protect:
2898 msg.append("")
2899 msg.append("package %s NOT merged" % self.settings.mycpv)
2900 msg.append("")
2901 msg.append("Detected file collision(s):")
2902 msg.append("")
2904 for f in collisions:
2905 msg.append("\t%s" % \
2906 os.path.join(destroot, f.lstrip(os.path.sep)))
2908 eerror(msg)
2910 msg = []
2911 msg.append("")
2912 msg.append("Searching all installed" + \
2913 " packages for file collisions...")
2914 msg.append("")
2915 msg.append("Press Ctrl-C to Stop")
2916 msg.append("")
2917 eerror(msg)
2919 owners = self.vartree.dbapi._owners.get_owners(collisions)
2920 self.vartree.dbapi.flush_cache()
2922 for pkg, owned_files in owners.iteritems():
2923 cpv = pkg.mycpv
2924 msg = []
2925 msg.append("%s" % cpv)
2926 for f in sorted(owned_files):
2927 msg.append("\t%s" % os.path.join(destroot,
2928 f.lstrip(os.path.sep)))
2929 msg.append("")
2930 eerror(msg)
2932 if not owners:
2933 eerror(["None of the installed" + \
2934 " packages claim the file(s).", ""])
2936 # The explanation about the collision and how to solve
2937 # it may not be visible via a scrollback buffer, especially
2938 # if the number of file collisions is large. Therefore,
2939 # show a summary at the end.
2940 if collision_protect:
2941 msg = "Package '%s' NOT merged due to file collisions." % \
2942 self.settings.mycpv
2943 else:
2944 msg = "Package '%s' merged despite file collisions." % \
2945 self.settings.mycpv
2946 msg += " If necessary, refer to your elog " + \
2947 "messages for the whole content of the above message."
2948 eerror(wrap(msg, 70))
2950 if collision_protect:
2951 return 1
2953 # The merge process may move files out of the image directory,
2954 # which causes invalidation of the .installed flag.
2955 try:
2956 os.unlink(os.path.join(
2957 os.path.dirname(normalize_path(srcroot)), ".installed"))
2958 except OSError, e:
2959 if e.errno != errno.ENOENT:
2960 raise
2961 del e
2963 self.dbdir = self.dbtmpdir
2964 self.delete()
2965 ensure_dirs(self.dbtmpdir)
2967 # run preinst script
2968 if scheduler is None:
2969 showMessage(">>> Merging %s to %s\n" % (self.mycpv, destroot))
2970 a = doebuild(myebuild, "preinst", destroot, self.settings,
2971 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
2972 vartree=self.vartree)
2973 else:
2974 a = scheduler.dblinkEbuildPhase(
2975 self, mydbapi, myebuild, "preinst")
2977 # XXX: Decide how to handle failures here.
2978 if a != os.EX_OK:
2979 showMessage("!!! FAILED preinst: "+str(a)+"\n",
2980 level=logging.ERROR, noiselevel=-1)
2981 return a
2983 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
2984 for x in os.listdir(inforoot):
2985 self.copyfile(inforoot+"/"+x)
2987 # write local package counter for recording
2988 lcfile = open(os.path.join(self.dbtmpdir, "COUNTER"),"w")
2989 lcfile.write(str(counter))
2990 lcfile.close()
2992 # open CONTENTS file (possibly overwriting old one) for recording
2993 outfile = open(os.path.join(self.dbtmpdir, "CONTENTS"),"w")
2995 self.updateprotect()
2997 #if we have a file containing previously-merged config file md5sums, grab it.
2998 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
2999 cfgfiledict = grabdict(conf_mem_file)
3000 if "NOCONFMEM" in self.settings:
3001 cfgfiledict["IGNORE"]=1
3002 else:
3003 cfgfiledict["IGNORE"]=0
3005 # Always behave like --noconfmem is enabled for downgrades
3006 # so that people who don't know about this option are less
3007 # likely to get confused when doing upgrade/downgrade cycles.
3008 pv_split = catpkgsplit(self.mycpv)[1:]
3009 for other in others_in_slot:
3010 if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
3011 cfgfiledict["IGNORE"] = 1
3012 break
3014 # Don't bump mtimes on merge since some application require
3015 # preservation of timestamps. This means that the unmerge phase must
3016 # check to see if file belongs to an installed instance in the same
3017 # slot.
3018 mymtime = None
3020 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
3021 prevmask = os.umask(0)
3022 secondhand = []
3024 # we do a first merge; this will recurse through all files in our srcroot but also build up a
3025 # "second hand" of symlinks to merge later
3026 if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
3027 return 1
3029 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
3030 # broken symlinks. We'll merge them too.
3031 lastlen = 0
3032 while len(secondhand) and len(secondhand)!=lastlen:
3033 # clear the thirdhand. Anything from our second hand that
3034 # couldn't get merged will be added to thirdhand.
3036 thirdhand = []
3037 self.mergeme(srcroot, destroot, outfile, thirdhand, secondhand, cfgfiledict, mymtime)
3039 #swap hands
3040 lastlen = len(secondhand)
3042 # our thirdhand now becomes our secondhand. It's ok to throw
3043 # away secondhand since thirdhand contains all the stuff that
3044 # couldn't be merged.
3045 secondhand = thirdhand
3047 if len(secondhand):
3048 # force merge of remaining symlinks (broken or circular; oh well)
3049 self.mergeme(srcroot, destroot, outfile, None, secondhand, cfgfiledict, mymtime)
3051 #restore umask
3052 os.umask(prevmask)
3054 #if we opened it, close it
3055 outfile.flush()
3056 outfile.close()
3058 # write out our collection of md5sums
3059 cfgfiledict.pop("IGNORE", None)
3060 ensure_dirs(os.path.dirname(conf_mem_file),
3061 gid=portage_gid, mode=02750, mask=02)
3062 writedict(cfgfiledict, conf_mem_file)
3064 # These caches are populated during collision-protect and the data
3065 # they contain is now invalid. It's very important to invalidate
3066 # the contents_inodes cache so that FEATURES=unmerge-orphans
3067 # doesn't unmerge anything that belongs to this package that has
3068 # just been merged.
3069 others_in_slot.append(self) # self has just been merged
3070 for dblnk in others_in_slot:
3071 dblnk.contentscache = None
3072 dblnk._contents_inodes = None
3073 dblnk._contents_basenames = None
3075 # If portage is reinstalling itself, remove the old
3076 # version now since we want to use the temporary
3077 # PORTAGE_BIN_PATH that will be removed when we return.
3078 reinstall_self = False
3079 if self.myroot == "/" and \
3080 "sys-apps" == self.cat and \
3081 "portage" == pkgsplit(self.pkg)[0]:
3082 reinstall_self = True
3084 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes"
3085 for dblnk in list(others_in_slot):
3086 if dblnk is self:
3087 continue
3088 if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
3089 continue
3090 showMessage(">>> Safely unmerging already-installed instance...\n")
3091 others_in_slot.remove(dblnk) # dblnk will unmerge itself now
3092 dblnk.unmerge(trimworld=0, ldpath_mtimes=prev_mtimes,
3093 others_in_slot=others_in_slot)
3094 # TODO: Check status and abort if necessary.
3095 dblnk.delete()
3096 showMessage(">>> Original instance of package unmerged safely.\n")
3098 if len(others_in_slot) > 1:
3099 from portage.output import colorize
3100 showMessage(colorize("WARN", "WARNING:")
3101 + " AUTOCLEAN is disabled. This can cause serious"
3102 + " problems due to overlapping packages.\n",
3103 level=logging.WARN, noiselevel=-1)
3105 # We hold both directory locks.
3106 self.dbdir = self.dbpkgdir
3107 self.delete()
3108 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
3110 # Check for file collisions with blocking packages
3111 # and remove any colliding files from their CONTENTS
3112 # since they now belong to this package.
3113 self._clear_contents_cache()
3114 contents = self.getcontents()
3115 destroot_len = len(destroot) - 1
3116 for blocker in blockers:
3117 self.vartree.dbapi.removeFromContents(blocker, iter(contents),
3118 relative_paths=False)
3120 self.vartree.dbapi._add(self)
3121 contents = self.getcontents()
3123 # regenerate reverse NEEDED map
3124 self.vartree.dbapi.linkmap.rebuild()
3126 #do postinst script
3127 self.settings["PORTAGE_UPDATE_ENV"] = \
3128 os.path.join(self.dbpkgdir, "environment.bz2")
3129 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
3130 try:
3131 if scheduler is None:
3132 a = doebuild(myebuild, "postinst", destroot, self.settings,
3133 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
3134 vartree=self.vartree)
3135 if a == os.EX_OK:
3136 showMessage(">>> %s %s\n" % (self.mycpv, "merged."))
3137 else:
3138 a = scheduler.dblinkEbuildPhase(
3139 self, mydbapi, myebuild, "postinst")
3140 finally:
3141 self.settings.pop("PORTAGE_UPDATE_ENV", None)
3143 # XXX: Decide how to handle failures here.
3144 if a != os.EX_OK:
3145 showMessage("!!! FAILED postinst: "+str(a)+"\n",
3146 level=logging.ERROR, noiselevel=-1)
3147 return a
3149 downgrade = False
3150 for v in otherversions:
3151 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
3152 downgrade = True
3154 #update environment settings, library paths. DO NOT change symlinks.
3155 env_update(makelinks=(not downgrade),
3156 target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
3157 contents=contents, env=self.settings.environ(),
3158 writemsg_level=self._display_merge)
3160 return os.EX_OK
3162 def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
3165 This function handles actual merging of the package contents to the livefs.
3166 It also handles config protection.
3168 @param srcroot: Where are we copying files from (usually ${D})
3169 @type srcroot: String (Path)
3170 @param destroot: Typically ${ROOT}
3171 @type destroot: String (Path)
3172 @param outfile: File to log operations to
3173 @type outfile: File Object
3174 @param secondhand: A set of items to merge in pass two (usually
3175 or symlinks that point to non-existing files that may get merged later)
3176 @type secondhand: List
3177 @param stufftomerge: Either a diretory to merge, or a list of items.
3178 @type stufftomerge: String or List
3179 @param cfgfiledict: { File:mtime } mapping for config_protected files
3180 @type cfgfiledict: Dictionary
3181 @param thismtime: The current time (typically long(time.time())
3182 @type thismtime: Long
3183 @rtype: None or Boolean
3184 @returns:
3185 1. True on failure
3186 2. None otherwise
3190 showMessage = self._display_merge
3191 scheduler = self._scheduler
3193 from os.path import sep, join
3194 srcroot = normalize_path(srcroot).rstrip(sep) + sep
3195 destroot = normalize_path(destroot).rstrip(sep) + sep
3197 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
3198 if isinstance(stufftomerge, basestring):
3199 #A directory is specified. Figure out protection paths, listdir() it and process it.
3200 mergelist = os.listdir(join(srcroot, stufftomerge))
3201 offset = stufftomerge
3202 else:
3203 mergelist = stufftomerge
3204 offset = ""
3206 for i, x in enumerate(mergelist):
3208 if scheduler is not None and \
3209 0 == i % self._file_merge_yield_interval:
3210 scheduler.scheduleYield()
3212 mysrc = join(srcroot, offset, x)
3213 mydest = join(destroot, offset, x)
3214 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
3215 myrealdest = join(sep, offset, x)
3216 # stat file once, test using S_* macros many times (faster that way)
3217 try:
3218 mystat = os.lstat(mysrc)
3219 except OSError, e:
3220 writemsg("\n")
3221 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
3222 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
3223 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
3224 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
3225 writemsg(red("!!! File: ")+str(mysrc)+"\n", noiselevel=-1)
3226 writemsg(red("!!! Error: ")+str(e)+"\n", noiselevel=-1)
3227 sys.exit(1)
3228 except Exception, e:
3229 writemsg("\n")
3230 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
3231 writemsg(red("!!! A stat call returned the following error for the following file:"))
3232 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
3233 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
3234 writemsg( "!!! File: "+str(mysrc)+"\n", noiselevel=-1)
3235 writemsg( "!!! Error: "+str(e)+"\n", noiselevel=-1)
3236 sys.exit(1)
3239 mymode = mystat[stat.ST_MODE]
3240 # handy variables; mydest is the target object on the live filesystems;
3241 # mysrc is the source object in the temporary install dir
3242 try:
3243 mydstat = os.lstat(mydest)
3244 mydmode = mydstat.st_mode
3245 except OSError, e:
3246 if e.errno != errno.ENOENT:
3247 raise
3248 del e
3249 #dest file doesn't exist
3250 mydstat = None
3251 mydmode = None
3253 if stat.S_ISLNK(mymode):
3254 # we are merging a symbolic link
3255 myabsto = abssymlink(mysrc)
3256 if myabsto.startswith(srcroot):
3257 myabsto = myabsto[len(srcroot):]
3258 myabsto = myabsto.lstrip(sep)
3259 myto = os.readlink(mysrc)
3260 if self.settings and self.settings["D"]:
3261 if myto.startswith(self.settings["D"]):
3262 myto = myto[len(self.settings["D"]):]
3263 # myrealto contains the path of the real file to which this symlink points.
3264 # we can simply test for existence of this file to see if the target has been merged yet
3265 myrealto = normalize_path(os.path.join(destroot, myabsto))
3266 if mydmode!=None:
3267 #destination exists
3268 if not stat.S_ISLNK(mydmode):
3269 if stat.S_ISDIR(mydmode):
3270 # directory in the way: we can't merge a symlink over a directory
3271 # we won't merge this, continue with next file...
3272 continue
3274 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
3275 # Kill file blocking installation of symlink to dir #71787
3276 pass
3277 elif self.isprotected(mydest):
3278 # Use md5 of the target in ${D} if it exists...
3279 try:
3280 newmd5 = perform_md5(join(srcroot, myabsto))
3281 except FileNotFound:
3282 # Maybe the target is merged already.
3283 try:
3284 newmd5 = perform_md5(myrealto)
3285 except FileNotFound:
3286 newmd5 = None
3287 mydest = new_protect_filename(mydest, newmd5=newmd5)
3289 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
3290 if (secondhand != None) and (not os.path.exists(myrealto)):
3291 # either the target directory doesn't exist yet or the target file doesn't exist -- or
3292 # the target is a broken symlink. We will add this file to our "second hand" and merge
3293 # it later.
3294 secondhand.append(mysrc[len(srcroot):])
3295 continue
3296 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
3297 mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
3298 if mymtime != None:
3299 showMessage(">>> %s -> %s\n" % (mydest, myto))
3300 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
3301 else:
3302 print "!!! Failed to move file."
3303 print "!!!", mydest, "->", myto
3304 sys.exit(1)
3305 elif stat.S_ISDIR(mymode):
3306 # we are merging a directory
3307 if mydmode != None:
3308 # destination exists
3310 if bsd_chflags:
3311 # Save then clear flags on dest.
3312 dflags = mydstat.st_flags
3313 if dflags != 0:
3314 bsd_chflags.lchflags(mydest, 0)
3316 if not os.access(mydest, os.W_OK):
3317 pkgstuff = pkgsplit(self.pkg)
3318 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
3319 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
3320 writemsg("!!! You may start the merge process again by using ebuild:\n")
3321 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
3322 writemsg("!!! And finish by running this: env-update\n\n")
3323 return 1
3325 if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
3326 # a symlink to an existing directory will work for us; keep it:
3327 showMessage("--- %s/\n" % mydest)
3328 if bsd_chflags:
3329 bsd_chflags.lchflags(mydest, dflags)
3330 else:
3331 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
3332 if movefile(mydest, mydest+".backup", mysettings=self.settings) is None:
3333 sys.exit(1)
3334 print "bak", mydest, mydest+".backup"
3335 #now create our directory
3336 if self.settings.selinux_enabled():
3337 import selinux
3338 sid = selinux.get_sid(mysrc)
3339 selinux.secure_mkdir(mydest,sid)
3340 else:
3341 os.mkdir(mydest)
3342 if bsd_chflags:
3343 bsd_chflags.lchflags(mydest, dflags)
3344 os.chmod(mydest, mystat[0])
3345 os.chown(mydest, mystat[4], mystat[5])
3346 showMessage(">>> %s/\n" % mydest)
3347 else:
3348 #destination doesn't exist
3349 if self.settings.selinux_enabled():
3350 import selinux
3351 sid = selinux.get_sid(mysrc)
3352 selinux.secure_mkdir(mydest, sid)
3353 else:
3354 os.mkdir(mydest)
3355 os.chmod(mydest, mystat[0])
3356 os.chown(mydest, mystat[4], mystat[5])
3357 showMessage(">>> %s/\n" % mydest)
3358 outfile.write("dir "+myrealdest+"\n")
3359 # recurse and merge this directory
3360 if self.mergeme(srcroot, destroot, outfile, secondhand,
3361 join(offset, x), cfgfiledict, thismtime):
3362 return 1
3363 elif stat.S_ISREG(mymode):
3364 # we are merging a regular file
3365 mymd5 = perform_md5(mysrc, calc_prelink=1)
3366 # calculate config file protection stuff
3367 mydestdir = os.path.dirname(mydest)
3368 moveme = 1
3369 zing = "!!!"
3370 mymtime = None
3371 if mydmode != None:
3372 # destination file exists
3373 if stat.S_ISDIR(mydmode):
3374 # install of destination is blocked by an existing directory with the same name
3375 moveme = 0
3376 showMessage("!!! %s\n" % mydest,
3377 level=logging.ERROR, noiselevel=-1)
3378 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
3379 cfgprot = 0
3380 # install of destination is blocked by an existing regular file,
3381 # or by a symlink to an existing regular file;
3382 # now, config file management may come into play.
3383 # we only need to tweak mydest if cfg file management is in play.
3384 if self.isprotected(mydest):
3385 # we have a protection path; enable config file management.
3386 destmd5 = perform_md5(mydest, calc_prelink=1)
3387 if mymd5 == destmd5:
3388 #file already in place; simply update mtimes of destination
3389 moveme = 1
3390 else:
3391 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
3392 """ An identical update has previously been
3393 merged. Skip it unless the user has chosen
3394 --noconfmem."""
3395 moveme = cfgfiledict["IGNORE"]
3396 cfgprot = cfgfiledict["IGNORE"]
3397 if not moveme:
3398 zing = "---"
3399 mymtime = long(mystat.st_mtime)
3400 else:
3401 moveme = 1
3402 cfgprot = 1
3403 if moveme:
3404 # Merging a new file, so update confmem.
3405 cfgfiledict[myrealdest] = [mymd5]
3406 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
3407 """A previously remembered update has been
3408 accepted, so it is removed from confmem."""
3409 del cfgfiledict[myrealdest]
3410 if cfgprot:
3411 mydest = new_protect_filename(mydest, newmd5=mymd5)
3413 # whether config protection or not, we merge the new file the
3414 # same way. Unless moveme=0 (blocking directory)
3415 if moveme:
3416 mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
3417 if mymtime is None:
3418 sys.exit(1)
3419 zing = ">>>"
3421 if mymtime != None:
3422 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
3423 showMessage("%s %s\n" % (zing,mydest))
3424 else:
3425 # we are merging a fifo or device node
3426 zing = "!!!"
3427 if mydmode is None:
3428 # destination doesn't exist
3429 if movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings) != None:
3430 zing = ">>>"
3431 else:
3432 sys.exit(1)
3433 if stat.S_ISFIFO(mymode):
3434 outfile.write("fif %s\n" % myrealdest)
3435 else:
3436 outfile.write("dev %s\n" % myrealdest)
3437 showMessage(zing + " " + mydest + "\n")
3439 def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
3440 mydbapi=None, prev_mtimes=None):
3442 If portage is reinstalling itself, create temporary
3443 copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
3444 to avoid relying on the new versions which may be
3445 incompatible. Register an atexit hook to clean up the
3446 temporary directories. Pre-load elog modules here since
3447 we won't be able to later if they get unmerged (happens
3448 when namespace changes).
3450 if self.vartree.dbapi._categories is not None:
3451 self.vartree.dbapi._categories = None
3452 if self.myroot == "/" and \
3453 "sys-apps" == self.cat and \
3454 "portage" == pkgsplit(self.pkg)[0]:
3455 settings = self.settings
3456 base_path_orig = os.path.dirname(settings["PORTAGE_BIN_PATH"])
3457 from tempfile import mkdtemp
3458 import shutil
3459 # Make the temp directory inside PORTAGE_TMPDIR since, unlike
3460 # /tmp, it can't be mounted with the "noexec" option.
3461 base_path_tmp = mkdtemp("", "._portage_reinstall_.",
3462 settings["PORTAGE_TMPDIR"])
3463 from portage.process import atexit_register
3464 atexit_register(shutil.rmtree, base_path_tmp)
3465 dir_perms = 0755
3466 for subdir in "bin", "pym":
3467 var_name = "PORTAGE_%s_PATH" % subdir.upper()
3468 var_orig = settings[var_name]
3469 var_new = os.path.join(base_path_tmp, subdir)
3470 settings[var_name] = var_new
3471 settings.backup_changes(var_name)
3472 shutil.copytree(var_orig, var_new, symlinks=True)
3473 os.chmod(var_new, dir_perms)
3474 os.chmod(base_path_tmp, dir_perms)
3475 # This serves so pre-load the modules.
3476 elog_process(self.mycpv, self.settings,
3477 phasefilter=filter_mergephases)
3479 return self._merge(mergeroot, inforoot,
3480 myroot, myebuild=myebuild, cleanup=cleanup,
3481 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3483 def _merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
3484 mydbapi=None, prev_mtimes=None):
3485 retval = -1
3486 self.lockdb()
3487 try:
3488 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
3489 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3490 # undo registrations of preserved libraries, bug #210501
3491 if retval != os.EX_OK:
3492 self.vartree.dbapi.plib_registry.unregister(self.mycpv, self.settings["SLOT"], self.settings["COUNTER"])
3493 # Process ebuild logfiles
3494 elog_process(self.mycpv, self.settings, phasefilter=filter_mergephases)
3495 if retval == os.EX_OK and "noclean" not in self.settings.features:
3496 if myebuild is None:
3497 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
3498 doebuild(myebuild, "clean", myroot, self.settings,
3499 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
3500 finally:
3501 self.unlockdb()
3502 return retval
3504 def getstring(self,name):
3505 "returns contents of a file with whitespace converted to spaces"
3506 if not os.path.exists(self.dbdir+"/"+name):
3507 return ""
3508 myfile = open(self.dbdir+"/"+name,"r")
3509 mydata = myfile.read().split()
3510 myfile.close()
3511 return " ".join(mydata)
3513 def copyfile(self,fname):
3514 import shutil
3515 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
3517 def getfile(self,fname):
3518 if not os.path.exists(self.dbdir+"/"+fname):
3519 return ""
3520 myfile = open(self.dbdir+"/"+fname,"r")
3521 mydata = myfile.read()
3522 myfile.close()
3523 return mydata
3525 def setfile(self,fname,data):
3526 write_atomic(os.path.join(self.dbdir, fname), data)
3528 def getelements(self,ename):
3529 if not os.path.exists(self.dbdir+"/"+ename):
3530 return []
3531 myelement = open(self.dbdir+"/"+ename,"r")
3532 mylines = myelement.readlines()
3533 myreturn = []
3534 for x in mylines:
3535 for y in x[:-1].split():
3536 myreturn.append(y)
3537 myelement.close()
3538 return myreturn
3540 def setelements(self,mylist,ename):
3541 myelement = open(self.dbdir+"/"+ename,"w")
3542 for x in mylist:
3543 myelement.write(x+"\n")
3544 myelement.close()
3546 def isregular(self):
3547 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
3548 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
3550 def write_contents(contents, root, f):
3552 Write contents to any file like object. The file will be left open.
3554 root_len = len(root) - 1
3555 for filename in sorted(contents):
3556 entry_data = contents[filename]
3557 entry_type = entry_data[0]
3558 relative_filename = filename[root_len:]
3559 if entry_type == "obj":
3560 entry_type, mtime, md5sum = entry_data
3561 line = "%s %s %s %s\n" % \
3562 (entry_type, relative_filename, md5sum, mtime)
3563 elif entry_type == "sym":
3564 entry_type, mtime, link = entry_data
3565 line = "%s %s -> %s %s\n" % \
3566 (entry_type, relative_filename, link, mtime)
3567 else: # dir, dev, fif
3568 line = "%s %s\n" % (entry_type, relative_filename)
3569 f.write(line)
3571 def tar_contents(contents, root, tar, protect=None, onProgress=None):
3572 from portage.util import normalize_path
3573 import tarfile
3574 root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
3575 id_strings = {}
3576 maxval = len(contents)
3577 curval = 0
3578 if onProgress:
3579 onProgress(maxval, 0)
3580 paths = contents.keys()
3581 paths.sort()
3582 for path in paths:
3583 curval += 1
3584 try:
3585 lst = os.lstat(path)
3586 except OSError, e:
3587 if e.errno != errno.ENOENT:
3588 raise
3589 del e
3590 if onProgress:
3591 onProgress(maxval, curval)
3592 continue
3593 contents_type = contents[path][0]
3594 if path.startswith(root):
3595 arcname = path[len(root):]
3596 else:
3597 raise ValueError("invalid root argument: '%s'" % root)
3598 live_path = path
3599 if 'dir' == contents_type and \
3600 not stat.S_ISDIR(lst.st_mode) and \
3601 os.path.isdir(live_path):
3602 # Even though this was a directory in the original ${D}, it exists
3603 # as a symlink to a directory in the live filesystem. It must be
3604 # recorded as a real directory in the tar file to ensure that tar
3605 # can properly extract it's children.
3606 live_path = os.path.realpath(live_path)
3607 tarinfo = tar.gettarinfo(live_path, arcname)
3608 # store numbers instead of real names like tar's --numeric-owner
3609 tarinfo.uname = id_strings.setdefault(tarinfo.uid, str(tarinfo.uid))
3610 tarinfo.gname = id_strings.setdefault(tarinfo.gid, str(tarinfo.gid))
3612 if stat.S_ISREG(lst.st_mode):
3613 # break hardlinks due to bug #185305
3614 tarinfo.type = tarfile.REGTYPE
3615 if protect and protect(path):
3616 # Create an empty file as a place holder in order to avoid
3617 # potential collision-protect issues.
3618 tarinfo.size = 0
3619 tar.addfile(tarinfo)
3620 else:
3621 f = open(path)
3622 try:
3623 tar.addfile(tarinfo, f)
3624 finally:
3625 f.close()
3626 else:
3627 tar.addfile(tarinfo)
3628 if onProgress:
3629 onProgress(maxval, curval)