Remove M2Crypto and replace with cryptography module
[unleashed-pkg5.git] / src / modules / client / image.py
blob27d573410c7347074b820e37f9e502510d20b46d
1 #!/usr/bin/python2.7
3 # CDDL HEADER START
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
20 # CDDL HEADER END
24 # Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
27 import atexit
28 import calendar
29 import collections
30 import copy
31 import datetime
32 import errno
33 import hashlib
34 import os
35 import platform
36 import shutil
37 import simplejson as json
38 import stat
39 import sys
40 import tempfile
41 import time
42 import urllib
44 from contextlib import contextmanager
45 from pkg.client import global_settings
46 logger = global_settings.logger
47 from cryptography import x509
48 from cryptography.hazmat.backends import default_backend
50 import pkg.actions
51 import pkg.catalog
52 import pkg.client.api_errors as apx
53 import pkg.client.bootenv as bootenv
54 import pkg.client.history as history
55 import pkg.client.imageconfig as imageconfig
56 import pkg.client.imageplan as imageplan
57 import pkg.client.linkedimage as li
58 import pkg.client.pkgdefs as pkgdefs
59 import pkg.client.pkgplan as pkgplan
60 import pkg.client.plandesc as plandesc
61 import pkg.client.progress as progress
62 import pkg.client.publisher as publisher
63 import pkg.client.sigpolicy as sigpolicy
64 import pkg.client.transport.transport as transport
65 import pkg.config as cfg
66 import pkg.file_layout.layout as fl
67 import pkg.fmri
68 import pkg.lockfile as lockfile
69 import pkg.manifest as manifest
70 import pkg.mediator as med
71 import pkg.misc as misc
72 import pkg.nrlock
73 import pkg.pkgsubprocess as subprocess
74 import pkg.portable as portable
75 import pkg.server.catalog
76 import pkg.smf as smf
77 import pkg.version
79 from pkg.client.debugvalues import DebugValues
80 from pkg.client.imagetypes import IMG_USER, IMG_ENTIRE
81 from pkg.client.transport.exception import InvalidContentException
82 from pkg.misc import EmptyI, EmptyDict
84 img_user_prefix = ".org.opensolaris,pkg"
85 img_root_prefix = "var/pkg"
87 IMG_PUB_DIR = "publisher"
89 class Image(object):
90 """An Image object is a directory tree containing the laid-down contents
91 of a self-consistent graph of Packages.
93 An Image has a root path.
95 An Image of type IMG_ENTIRE does not have a parent Image. Other Image
96 types must have a parent Image. The external state of the parent Image
97 must be accessible from the Image's context, or duplicated within the
98 Image (IMG_PARTIAL for zones, for instance).
100 The parent of a user Image can be a partial Image. The parent of a
101 partial Image must be an entire Image.
103 An Image of type IMG_USER stores its external state at self.root +
104 ".org.opensolaris,pkg".
106 An Image of type IMG_ENTIRE or IMG_PARTIAL stores its external state at
107 self.root + "/var/pkg".
109 An Image needs to be able to have a different repository set than the
110 system's root Image.
112 For image format details, see section 5.3 of doc/on-disk-format.txt
113 in the pkg(5) gate.
116 # Class constants
117 CURRENT_VERSION = 4
118 IMG_CATALOG_KNOWN = "known"
119 IMG_CATALOG_INSTALLED = "installed"
121 __STATE_UPDATING_FILE = "state_updating"
123 def __init__(self, root, user_provided_dir=False, progtrack=None,
124 should_exist=True, imgtype=None, force=False,
125 augment_ta_from_parent_image=True, allow_ondisk_upgrade=None,
126 props=misc.EmptyDict, cmdpath=None):
128 if should_exist:
129 assert(imgtype is None)
130 assert(not force)
131 else:
132 assert(imgtype is not None)
134 # Alternate package sources.
135 self.__alt_pkg_pub_map = None
136 self.__alt_pubs = None
137 self.__alt_known_cat = None
138 self.__alt_pkg_sources_loaded = False
140 # Determine identity of client executable if appropriate.
141 if cmdpath == None:
142 cmdpath = misc.api_cmdpath()
143 self.cmdpath = cmdpath
145 if self.cmdpath != None:
146 self.__cmddir = os.path.dirname(cmdpath)
148 # prevent brokeness in the test suite
149 if self.cmdpath and \
150 "PKG_NO_RUNPY_CMDPATH" in os.environ and \
151 self.cmdpath.endswith(os.sep + "run.py"):
152 raise RuntimeError("""
153 An Image object was allocated from within ipkg test suite and
154 cmdpath was not explicitly overridden. Please make sure to
155 explicitly set cmdpath when allocating an Image object, or
156 override cmdpath when allocating an Image object by setting PKG_CMDPATH
157 in the environment or by setting simulate_cmdpath in DebugValues.""")
159 self.linked = None
161 # Indicates whether automatic image format upgrades of the
162 # on-disk format are allowed.
163 self.allow_ondisk_upgrade = allow_ondisk_upgrade
164 self.__upgraded = False
166 # Must happen after upgraded assignment.
167 self.__init_catalogs()
169 self.__imgdir = None
170 self.__root = root
172 self.blocking_locks = False
173 self.cfg = None
174 self.history = history.History()
175 self.imageplan = None
176 self.img_prefix = None
177 self.index_dir = None
178 self.plandir = None
179 self.version = -1
181 # Can have multiple read cache dirs...
182 self.__read_cache_dirs = []
184 # ...but only one global write cache dir and incoming write dir.
185 self.__write_cache_dir = None
186 self.__user_cache_dir = None
187 self._incoming_cache_dir = None
189 # Set if write_cache is actually a tree like /var/pkg/publisher
190 # instead of a flat cache.
191 self.__write_cache_root = None
193 self.__lock = pkg.nrlock.NRLock()
194 self.__lockfile = None
195 self.__sig_policy = None
196 self.__trust_anchors = None
197 self.__bad_trust_anchors = []
199 # cache for presence of boot-archive
200 self.__boot_archive = None
202 # When users and groups are added before their database files
203 # have been installed, the actions store them temporarily in the
204 # image, in these members.
205 self._users = set()
206 self._groups = set()
207 self._usersbyname = {}
208 self._groupsbyname = {}
210 # Set of pkg stems being avoided
211 self.__avoid_set = None
212 self.__avoid_set_altered = False
214 # set of pkg stems subject to group
215 # dependency but removed because obsolete
216 self.__group_obsolete = None
218 # The action dictionary that's returned by __load_actdict.
219 self.__actdict = None
220 self.__actdict_timestamp = None
222 self.__property_overrides = { "property": props }
224 # Transport operations for this image
225 self.transport = transport.Transport(
226 transport.ImageTransportCfg(self))
228 self.linked = li.LinkedImage(self)
230 if should_exist:
231 self.find_root(self.root, user_provided_dir,
232 progtrack)
233 else:
234 if not force and self.image_type(self.root) != None:
235 raise apx.ImageAlreadyExists(self.root)
236 if not force and os.path.exists(self.root):
237 # ignore .zfs snapdir if it's present
238 snapdir = os.path.join(self.root, ".zfs")
239 listdir = set(os.listdir(self.root))
240 if os.path.isdir(snapdir):
241 listdir -= set([".zfs"])
242 if len(listdir) > 0:
243 raise apx.CreatingImageInNonEmptyDir(
244 self.root)
245 self.__set_dirs(root=self.root, imgtype=imgtype,
246 progtrack=progtrack, purge=True)
248 # right now we don't explicitly set dir/file modes everywhere;
249 # set umask to proper value to prevent problems w/ overly
250 # locked down umask.
251 os.umask(0o022)
253 self.augment_ta_from_parent_image = augment_ta_from_parent_image
255 def __catalog_loaded(self, name):
256 """Returns a boolean value indicating whether the named catalog
257 has already been loaded. This is intended to be used as an
258 optimization function to determine which catalog to request."""
260 return name in self.__catalogs
262 def __init_catalogs(self):
263 """Initializes default catalog state. Actual data is provided
264 on demand via get_catalog()"""
266 if self.__upgraded and self.version < 3:
267 # Ignore request; transformed catalog data only exists
268 # in memory and can't be reloaded from disk.
269 return
271 # This is used to cache image catalogs.
272 self.__catalogs = {}
273 self.__alt_pkg_sources_loaded = False
275 @staticmethod
276 def alloc(*args, **kwargs):
277 return Image(*args, **kwargs)
279 @property
280 def imgdir(self):
281 """The absolute path of the image's metadata."""
282 return self.__imgdir
284 @property
285 def locked(self):
286 """A boolean value indicating whether the image is currently
287 locked."""
289 return self.__lock and self.__lock.locked
291 @property
292 def root(self):
293 """The absolute path of the image's location."""
294 return self.__root
296 @property
297 def signature_policy(self):
298 """The current signature policy for this image."""
300 if self.__sig_policy is not None:
301 return self.__sig_policy
302 txt = self.cfg.get_policy_str(imageconfig.SIGNATURE_POLICY)
303 names = self.cfg.get_property("property",
304 "signature-required-names")
305 self.__sig_policy = sigpolicy.Policy.policy_factory(txt, names)
306 return self.__sig_policy
308 @property
309 def trust_anchors(self):
310 """A dictionary mapping subject hashes for certificates this
311 image trusts to those certs. The image trusts the trust anchors
312 in its trust_anchor_dir and those in the image from which the
313 client was run."""
315 if self.__trust_anchors is not None:
316 return self.__trust_anchors
318 user_set_ta_loc = True
319 rel_dir = self.get_property("trust-anchor-directory")
320 if rel_dir[0] == "/":
321 rel_dir = rel_dir[1:]
322 trust_anchor_loc = os.path.join(self.root, rel_dir)
323 loc_is_dir = os.path.isdir(trust_anchor_loc)
324 pkg_trust_anchors = {}
325 if self.__cmddir and self.augment_ta_from_parent_image:
326 pkg_trust_anchors = Image(self.__cmddir,
327 augment_ta_from_parent_image=False,
328 cmdpath=self.cmdpath).trust_anchors
329 if not loc_is_dir and os.path.exists(trust_anchor_loc):
330 raise apx.InvalidPropertyValue(_("The trust "
331 "anchors for the image were expected to be found "
332 "in {0}, but that is not a directory. Please set "
333 "the image property 'trust-anchor-directory' to "
334 "the correct path.").format(trust_anchor_loc))
335 self.__trust_anchors = {}
336 if loc_is_dir:
337 for fn in os.listdir(trust_anchor_loc):
338 pth = os.path.join(trust_anchor_loc, fn)
339 if os.path.islink(pth):
340 continue
341 try:
342 with open(pth, "rb") as f:
343 raw = f.read()
344 trusted_ca = \
345 x509.load_pem_x509_certificate(
346 raw, default_backend())
347 except (ValueError, IOError) as e:
348 self.__bad_trust_anchors.append(
349 (pth, str(e)))
350 else:
351 # We store certificates internally by
352 # the SHA-1 hash of its subject.
353 s = hashlib.sha1(misc.force_bytes(
354 trusted_ca.subject)).hexdigest()
355 self.__trust_anchors.setdefault(s, [])
356 self.__trust_anchors[s].append(
357 trusted_ca)
358 for s in pkg_trust_anchors:
359 if s not in self.__trust_anchors:
360 self.__trust_anchors[s] = pkg_trust_anchors[s]
361 return self.__trust_anchors
363 @property
364 def bad_trust_anchors(self):
365 """A list of strings decribing errors encountered while parsing
366 trust anchors."""
368 return [_("{path} is expected to be a certificate but could "
369 "not be parsed. The error encountered "
370 "was:\n\t{err}").format(path=p, err=e)
371 for p, e in self.__bad_trust_anchors
374 @property
375 def write_cache_path(self):
376 """The path to the filesystem that holds the write cache--used
377 to compute whether sufficent space is available for
378 downloads."""
380 return self.__user_cache_dir or \
381 os.path.join(self.imgdir, IMG_PUB_DIR)
383 @contextmanager
384 def locked_op(self, op, allow_unprivileged=False, new_history_op=True):
385 """Helper method for executing an image-modifying operation
386 that needs locking. It automatically handles calling
387 log_operation_start and log_operation_end by default. Locking
388 behaviour is controlled by the blocking_locks image property.
390 'allow_unprivileged' is an optional boolean value indicating
391 that permissions-related exceptions should be ignored when
392 attempting to obtain the lock as the related operation will
393 still work correctly even though the image cannot (presumably)
394 be modified.
396 'new_history_op' indicates whether we should handle history
397 operations.
400 error = None
401 self.lock(allow_unprivileged=allow_unprivileged)
402 try:
403 be_name, be_uuid = \
404 bootenv.BootEnv.get_be_name(self.root)
405 if new_history_op:
406 self.history.log_operation_start(op,
407 be_name=be_name, be_uuid=be_uuid)
408 yield
409 except apx.ImageLockedError as e:
410 # Don't unlock the image if the call failed to
411 # get the lock.
412 error = e
413 raise
414 except Exception as e:
415 error = e
416 self.unlock()
417 raise
418 else:
419 self.unlock()
420 finally:
421 if new_history_op:
422 self.history.log_operation_end(error=error)
424 def lock(self, allow_unprivileged=False):
425 """Locks the image in preparation for an image-modifying
426 operation. Raises an ImageLockedError exception on failure.
427 Locking behaviour is controlled by the blocking_locks image
428 property.
430 'allow_unprivileged' is an optional boolean value indicating
431 that permissions-related exceptions should be ignored when
432 attempting to obtain the lock as the related operation will
433 still work correctly even though the image cannot (presumably)
434 be modified.
437 blocking = self.blocking_locks
439 # First, attempt to obtain a thread lock.
440 if not self.__lock.acquire(blocking=blocking):
441 raise apx.ImageLockedError()
443 try:
444 # Attempt to obtain a file lock.
445 self.__lockfile.lock(blocking=blocking)
446 except EnvironmentError as e:
447 exc = None
448 if e.errno == errno.ENOENT:
449 return
450 if e.errno == errno.EACCES:
451 exc = apx.UnprivilegedUserError(e.filename)
452 elif e.errno == errno.EROFS:
453 exc = apx.ReadOnlyFileSystemException(
454 e.filename)
455 else:
456 self.__lock.release()
457 raise
459 if exc and not allow_unprivileged:
460 self.__lock.release()
461 raise exc
462 except:
463 # If process lock fails, ensure thread lock is released.
464 self.__lock.release()
465 raise
467 def unlock(self):
468 """Unlocks the image."""
470 try:
471 if self.__lockfile:
472 self.__lockfile.unlock()
473 finally:
474 self.__lock.release()
476 def image_type(self, d):
477 """Returns the type of image at directory: d; or None"""
478 rv = None
480 def is_image(sub_d, prefix):
481 # First check for new image configuration file.
482 if os.path.isfile(os.path.join(sub_d, prefix,
483 "pkg5.image")):
484 # Regardless of directory structure, assume
485 # this is an image for now.
486 return True
488 if not os.path.isfile(os.path.join(sub_d, prefix,
489 "cfg_cache")):
490 # For older formats, if configuration is
491 # missing, this can't be an image.
492 return False
494 # Configuration exists, but for older formats,
495 # all of these directories have to exist.
496 for n in ("state", "pkg"):
497 if not os.path.isdir(os.path.join(sub_d, prefix,
498 n)):
499 return False
501 return True
503 if os.path.isdir(os.path.join(d, img_user_prefix)) and \
504 is_image(d, img_user_prefix):
505 rv = IMG_USER
506 elif os.path.isdir(os.path.join(d, img_root_prefix)) and \
507 is_image(d, img_root_prefix):
508 rv = IMG_ENTIRE
509 return rv
511 def find_root(self, d, exact_match=False, progtrack=None):
512 # Ascend from the given directory d to find first
513 # encountered image. If exact_match is true, if the
514 # image found doesn't match startd, raise an
515 # ImageNotFoundException.
517 startd = d
518 # eliminate problem if relative path such as "." is passed in
519 d = os.path.realpath(d)
521 while True:
522 imgtype = self.image_type(d)
523 if imgtype in (IMG_USER, IMG_ENTIRE):
524 if exact_match and \
525 os.path.realpath(startd) != \
526 os.path.realpath(d):
527 raise apx.ImageNotFoundException(
528 exact_match, startd, d)
529 self.__set_dirs(imgtype=imgtype, root=d,
530 startd=startd, progtrack=progtrack)
531 return
533 # XXX follow symlinks or not?
534 oldpath = d
535 d = os.path.normpath(os.path.join(d, os.path.pardir))
537 # Make sure we are making progress and aren't in an
538 # infinite loop.
540 # (XXX - Need to deal with symlinks here too)
541 if d == oldpath:
542 raise apx.ImageNotFoundException(
543 exact_match, startd, d)
545 def __load_config(self):
546 """Load this image's cached configuration from the default
547 location. This function should not be called anywhere other
548 than __set_dirs()."""
550 # XXX Incomplete with respect to doc/image.txt description of
551 # configuration.
553 if self.root == None:
554 raise RuntimeError("self.root must be set")
556 version = None
557 if self.version > -1:
558 if self.version >= 3:
559 # Configuration version is currently 3
560 # for all v3 images and newer.
561 version = 3
562 else:
563 version = self.version
565 self.cfg = imageconfig.ImageConfig(self.__cfgpathname,
566 self.root, version=version,
567 overrides=self.__property_overrides)
569 if self.__upgraded:
570 self.cfg = imageconfig.BlendedConfig(self.cfg,
571 self.get_catalog(self.IMG_CATALOG_INSTALLED).\
572 get_package_counts_by_pub(),
573 self.imgdir, self.transport,
574 self.cfg.get_policy("use-system-repo"))
576 self.__load_publisher_ssl()
578 def __store_publisher_ssl(self):
579 """Normalizes publisher SSL configuration data, storing any
580 certificate files as needed in the image's SSL directory. This
581 logic is performed here in the image instead of ImageConfig as
582 it relies on special knowledge of the image structure."""
584 ssl_dir = os.path.join(self.imgdir, "ssl")
586 def store_ssl_file(src):
587 try:
588 if not src or not os.path.exists(src):
589 # If SSL file doesn't exist (for
590 # whatever reason), then don't update
591 # configuration. (Let the failure
592 # happen later during an operation
593 # that requires the file.)
594 return
595 except EnvironmentError as e:
596 raise apx._convert_error(e)
598 # Ensure ssl_dir exists; makedirs handles any errors.
599 misc.makedirs(ssl_dir)
601 try:
602 # Destination name is based on digest of file.
603 # In order for this image to interoperate with
604 # older and newer clients, we must use sha-1
605 # here.
606 dest = os.path.join(ssl_dir,
607 misc.get_data_digest(src,
608 hash_func=hashlib.sha1)[0])
609 if src != dest:
610 portable.copyfile(src, dest)
612 # Ensure file can be read by unprivileged users.
613 os.chmod(dest, misc.PKG_FILE_MODE)
614 except EnvironmentError as e:
615 raise apx._convert_error(e)
616 return dest
618 for pub in self.cfg.publishers.values():
619 # self.cfg.publishers is used because gen_publishers
620 # includes temporary publishers and this is only for
621 # configured ones.
622 repo = pub.repository
623 if not repo:
624 continue
626 # Store and normalize ssl_cert and ssl_key.
627 for u in repo.origins + repo.mirrors:
628 for prop in ("ssl_cert", "ssl_key"):
629 pval = getattr(u, prop)
630 if pval:
631 pval = store_ssl_file(pval)
632 if not pval:
633 continue
634 # Store path as absolute to image root,
635 # it will be corrected on load to match
636 # actual image location if needed.
637 setattr(u, prop,
638 os.path.splitdrive(self.root)[0] +
639 os.path.sep +
640 misc.relpath(pval, start=self.root))
642 def __load_publisher_ssl(self):
643 """Should be called every time image configuration is loaded;
644 ensure ssl_cert and ssl_key properties of publisher repository
645 URI objects match current image location."""
647 ssl_dir = os.path.join(self.imgdir, "ssl")
649 for pub in self.cfg.publishers.values():
650 # self.cfg.publishers is used because gen_publishers
651 # includes temporary publishers and this is only for
652 # configured ones.
653 repo = pub.repository
654 if not repo:
655 continue
657 for u in repo.origins + repo.mirrors:
658 for prop in ("ssl_cert", "ssl_key"):
659 pval = getattr(u, prop)
660 if not pval:
661 continue
662 if not os.path.join(self.img_prefix,
663 "ssl") in os.path.dirname(pval):
664 continue
665 # If special image directory is part
666 # of path, then assume path should be
667 # rewritten to match current image
668 # location.
669 setattr(u, prop, os.path.join(ssl_dir,
670 os.path.basename(pval)))
672 def save_config(self):
673 # First, create the image directories if they haven't been, so
674 # the configuration file can be written.
675 self.mkdirs()
677 self.__store_publisher_ssl()
678 self.cfg.write()
679 self.__load_publisher_ssl()
681 # Remove the old the pkg.sysrepo(1M) cache, if present.
682 cache_path = os.path.join(self.root,
683 global_settings.sysrepo_pub_cache_path)
684 try:
685 portable.remove(cache_path)
686 except EnvironmentError as e:
687 if e.errno != errno.ENOENT:
688 raise apx._convert_error(e)
690 if self.is_liveroot() and \
691 smf.get_state(
692 "svc:/application/pkg/system-repository:default") in \
693 (smf.SMF_SVC_TMP_ENABLED, smf.SMF_SVC_ENABLED):
694 smf.refresh([
695 "svc:/application/pkg/system-repository:default"])
697 # This ensures all old transport configuration is thrown away.
698 self.transport = transport.Transport(
699 transport.ImageTransportCfg(self))
701 def mkdirs(self, root=None, version=None):
702 """Create any missing parts of the image's directory structure.
704 'root' is an optional path to a directory to create the new
705 image structure in. If not provided, the current image
706 directory is the default.
708 'version' is an optional integer value indicating the version
709 of the structure to create. If not provided, the current image
710 version is the default.
713 if not root:
714 root = self.imgdir
715 if not version:
716 version = self.version
718 if version == self.CURRENT_VERSION:
719 img_dirs = ["cache/index", "cache/publisher",
720 "cache/tmp", "gui_cache", "history", "license",
721 "lost+found", "publisher", "ssl", "state/installed",
722 "state/known"]
723 else:
724 img_dirs = ["download", "file", "gui_cache", "history",
725 "index", "lost+found", "pkg", "publisher",
726 "state/installed", "state/known", "tmp"]
728 for sd in img_dirs:
729 try:
730 misc.makedirs(os.path.join(root, sd))
731 except EnvironmentError as e:
732 raise apx._convert_error(e)
734 def __set_dirs(self, imgtype, root, startd=None, progtrack=None,
735 purge=False):
736 # Ensure upgraded status is reset.
737 self.__upgraded = False
739 if not self.__allow_liveroot() and root == misc.liveroot():
740 if startd == None:
741 startd = root
742 raise RuntimeError(
743 "Live root image access is disabled but was \
744 attempted.\nliveroot: {0}\nimage path: {1}".format(
745 misc.liveroot(), startd))
747 self.__root = root
748 self.type = imgtype
749 if self.type == IMG_USER:
750 self.img_prefix = img_user_prefix
751 else:
752 self.img_prefix = img_root_prefix
754 # Use a new Transport object every time location is changed.
755 self.transport = transport.Transport(
756 transport.ImageTransportCfg(self))
758 # cleanup specified path
759 if os.path.isdir(root):
760 try:
761 cwd = os.getcwd()
762 except Exception as e:
763 # If current directory can't be obtained for any
764 # reason, ignore the error.
765 cwd = None
767 try:
768 os.chdir(root)
769 self.__root = os.getcwd()
770 except EnvironmentError as e:
771 raise apx._convert_error(e)
772 finally:
773 if cwd:
774 os.chdir(cwd)
776 # If current image is locked, then it should be unlocked
777 # and then relocked after the imgdir is changed. This
778 # ensures that alternate BE scenarios work.
779 relock = self.imgdir and self.locked
780 if relock:
781 self.unlock()
783 # Must set imgdir first.
784 self.__imgdir = os.path.join(self.root, self.img_prefix)
786 # Force a reset of version.
787 self.version = -1
789 # Assume version 4+ configuration location.
790 self.__cfgpathname = os.path.join(self.imgdir, "pkg5.image")
792 # In the case of initial image creation, purge is specified
793 # to ensure that when an image is created over an existing
794 # one, any old data is removed first.
795 if purge and os.path.exists(self.imgdir):
796 for entry in os.listdir(self.imgdir):
797 if entry == "ssl":
798 # Preserve certs and keys directory
799 # as a special exception.
800 continue
801 epath = os.path.join(self.imgdir, entry)
802 try:
803 if os.path.isdir(epath):
804 shutil.rmtree(epath)
805 else:
806 portable.remove(epath)
807 except EnvironmentError as e:
808 raise apx._convert_error(e)
809 elif not purge:
810 # Determine if the version 4 configuration file exists.
811 if not os.path.exists(self.__cfgpathname):
812 self.__cfgpathname = os.path.join(self.imgdir,
813 "cfg_cache")
815 # Load the image configuration.
816 self.__load_config()
818 if not purge:
819 try:
820 self.version = int(self.cfg.get_property("image",
821 "version"))
822 except (cfg.PropertyConfigError, ValueError):
823 # If version couldn't be read from
824 # configuration, then allow fallback
825 # path below to set things right.
826 self.version = -1
828 if self.version <= 0:
829 # If version doesn't exist, attempt to determine version
830 # based on structure.
831 pub_root = os.path.join(self.imgdir, IMG_PUB_DIR)
832 if purge:
833 # This is a new image.
834 self.version = self.CURRENT_VERSION
835 elif os.path.exists(pub_root):
836 cache_root = os.path.join(self.imgdir, "cache")
837 if os.path.exists(cache_root):
838 # The image must be corrupted, as the
839 # version should have been loaded from
840 # configuration. For now, raise an
841 # exception. In the future, this
842 # behaviour should probably be optional
843 # so that pkg fix or pkg verify can
844 # still use the image.
845 raise apx.UnsupportedImageError(
846 self.root)
847 else:
848 # Assume version 3 image.
849 self.version = 3
851 # Reload image configuration again now that
852 # version has been determined so that property
853 # definitions match.
854 self.__load_config()
855 elif os.path.exists(os.path.join(self.imgdir,
856 "catalog")):
857 self.version = 2
859 # Reload image configuration again now that
860 # version has been determined so that property
861 # definitions match.
862 self.__load_config()
863 else:
864 # Format is too old or invalid.
865 raise apx.UnsupportedImageError(self.root)
867 if self.version > self.CURRENT_VERSION or self.version < 2:
868 # Image is too new or too old.
869 raise apx.UnsupportedImageError(self.root)
871 # Ensure image version matches determined one; this must
872 # be set *after* the version checks above.
873 self.cfg.set_property("image", "version", self.version)
875 # Remaining dirs may now be set.
876 if self.version == self.CURRENT_VERSION:
877 self.__tmpdir = os.path.join(self.imgdir, "cache",
878 "tmp")
879 else:
880 self.__tmpdir = os.path.join(self.imgdir, "tmp")
881 self._statedir = os.path.join(self.imgdir, "state")
882 self.plandir = os.path.join(self.__tmpdir, "plan")
883 self.update_index_dir()
885 self.history.root_dir = self.imgdir
886 self.__lockfile = lockfile.LockFile(os.path.join(self.imgdir,
887 "lock"), set_lockstr=lockfile.client_lock_set_str,
888 get_lockstr=lockfile.client_lock_get_str,
889 failure_exc=apx.ImageLockedError,
890 provide_mutex=False)
892 if relock:
893 self.lock()
895 # Setup cache directories.
896 self.__read_cache_dirs = []
897 self._incoming_cache_dir = None
898 self.__user_cache_dir = None
899 self.__write_cache_dir = None
900 self.__write_cache_root = None
901 # The user specified cache is used as an additional place to
902 # read cache data from, but as the only place to store new
903 # cache data.
904 if "PKG_CACHEROOT" in os.environ:
905 # If set, cache is structured like /var/pkg/publisher.
906 # get_cachedirs() will build paths for each publisher's
907 # cache using this directory.
908 self.__user_cache_dir = os.path.normpath(
909 os.environ["PKG_CACHEROOT"])
910 self.__write_cache_root = self.__user_cache_dir
911 elif "PKG_CACHEDIR" in os.environ:
912 # If set, cache is a flat structure that is used for
913 # all publishers.
914 self.__user_cache_dir = os.path.normpath(
915 os.environ["PKG_CACHEDIR"])
916 self.__write_cache_dir = self.__user_cache_dir
917 # Since the cache structure is flat, add it to the
918 # list of global read caches.
919 self.__read_cache_dirs.append(self.__user_cache_dir)
920 if self.__user_cache_dir:
921 self._incoming_cache_dir = os.path.join(
922 self.__user_cache_dir,
923 "incoming-{0:d}".format(os.getpid()))
925 if self.version < 4:
926 self.__action_cache_dir = self.temporary_dir()
927 else:
928 self.__action_cache_dir = os.path.join(self.imgdir,
929 "cache")
931 if self.version < 4:
932 if not self.__user_cache_dir:
933 self.__write_cache_dir = os.path.join(
934 self.imgdir, "download")
935 self._incoming_cache_dir = os.path.join(
936 self.__write_cache_dir,
937 "incoming-{0:d}".format(os.getpid()))
938 self.__read_cache_dirs.append(os.path.normpath(
939 os.path.join(self.imgdir, "download")))
940 elif not self._incoming_cache_dir:
941 # Only a global incoming cache exists for newer images.
942 self._incoming_cache_dir = os.path.join(self.imgdir,
943 "cache", "incoming-{0:d}".format(os.getpid()))
945 # Test if we have the permissions to create the cache
946 # incoming directory in this hierarchy. If not, we'll need to
947 # move it somewhere else.
948 try:
949 os.makedirs(self._incoming_cache_dir)
950 except EnvironmentError as e:
951 if e.errno == errno.EACCES or e.errno == errno.EROFS:
952 self.__write_cache_dir = tempfile.mkdtemp(
953 prefix="download-{0:d}-".format(
954 os.getpid()))
955 self._incoming_cache_dir = os.path.normpath(
956 os.path.join(self.__write_cache_dir,
957 "incoming-{0:d}".format(os.getpid())))
958 self.__read_cache_dirs.append(
959 self.__write_cache_dir)
960 # There's no image cleanup hook, so we'll just
961 # remove this directory on process exit.
962 atexit.register(shutil.rmtree,
963 self.__write_cache_dir, ignore_errors=True)
964 else:
965 os.removedirs(self._incoming_cache_dir)
967 # Forcibly discard image catalogs so they can be re-loaded
968 # from the new location if they are already loaded. This
969 # also prevents scribbling on image state information in
970 # the wrong location.
971 self.__init_catalogs()
973 # Upgrade the image's format if needed.
974 self.update_format(allow_unprivileged=True,
975 progtrack=progtrack)
977 # If we haven't loaded the system publisher configuration, do
978 # that now.
979 if isinstance(self.cfg, imageconfig.ImageConfig):
980 self.cfg = imageconfig.BlendedConfig(self.cfg,
981 self.get_catalog(self.IMG_CATALOG_INSTALLED).\
982 get_package_counts_by_pub(),
983 self.imgdir, self.transport,
984 self.cfg.get_policy("use-system-repo"))
986 # Check to see if any system publishers have been changed.
987 # If so they need to be refreshed, so clear last_refreshed.
988 for p in self.cfg.modified_pubs:
989 p.meta_root = self._get_publisher_meta_root(p.prefix)
990 p.last_refreshed = None
992 # Check to see if any system publishers have been
993 # removed. If they have, remove their metadata and
994 # rebuild the catalogs.
995 changed = False
996 for p in self.cfg.removed_pubs:
997 p.meta_root = self._get_publisher_meta_root(p.prefix)
998 try:
999 self.remove_publisher_metadata(p, rebuild=False)
1000 changed = True
1001 except apx.PermissionsException:
1002 pass
1003 if changed:
1004 self.__rebuild_image_catalogs()
1006 # we delay writing out any new system repository configuration
1007 # until we've updated on on-disk catalog state. (otherwise we
1008 # could lose track of syspub publishers changes and either
1009 # return stale catalog information, or not do refreshes when
1010 # we need to.)
1011 self.cfg.write_sys_cfg()
1013 self.__load_publisher_ssl()
1014 if purge:
1015 # Configuration shouldn't be written again unless this
1016 # is an image creation operation (hence the purge).
1017 self.save_config()
1019 # Let the linked image subsystem know that root is moving
1020 self.linked._init_root()
1022 # load image avoid pkg set
1023 self.__avoid_set_load()
1025 def update_format(self, allow_unprivileged=False, progtrack=None):
1026 """Transform the existing image structure and its data to
1027 the newest format. Callers are responsible for locking.
1029 'allow_unprivileged' is an optional boolean indicating
1030 whether a fallback to an in-memory only upgrade should
1031 be performed if a PermissionsException is encountered
1032 during the operation.
1034 'progtrack' is an optional ProgressTracker object.
1037 if self.version == self.CURRENT_VERSION:
1038 # Already upgraded.
1039 self.__upgraded = True
1041 # If pre-upgrade data still exists; fire off a
1042 # process to dump it so execution can continue.
1043 orig_root = self.imgdir + ".old"
1044 nullf = open(os.devnull, "w")
1045 if os.path.exists(orig_root):
1046 # Ensure all output is discarded; it really
1047 # doesn't matter if this succeeds.
1048 subprocess.Popen("rm -rf {0}".format(orig_root),
1049 shell=True, stdout=nullf, stderr=nullf)
1050 return False
1052 if not progtrack:
1053 progtrack = progress.NullProgressTracker()
1055 # Not technically 'caching', but close enough ...
1056 progtrack.cache_catalogs_start()
1058 # Upgrade catalog data if needed.
1059 self.__upgrade_catalogs()
1061 # Data conversion finished.
1062 self.__upgraded = True
1064 # Determine if on-disk portion of the upgrade is allowed.
1065 if self.allow_ondisk_upgrade == False:
1066 return True
1068 if self.allow_ondisk_upgrade is None and self.type != IMG_USER:
1069 if not self.is_liveroot() and not self.is_zone():
1070 # By default, don't update image format if it
1071 # is not the live root, and is not for a zone.
1072 self.allow_ondisk_upgrade = False
1073 return True
1075 # The logic to perform the on-disk upgrade is in its own
1076 # function so that it can easily be wrapped with locking logic.
1077 with self.locked_op("update-format",
1078 allow_unprivileged=allow_unprivileged):
1079 self.__upgrade_image_format(progtrack,
1080 allow_unprivileged=allow_unprivileged)
1082 progtrack.cache_catalogs_done()
1083 return True
1085 def __upgrade_catalogs(self):
1086 """Private helper function for update_format."""
1088 if self.version >= 3:
1089 # Nothing to do.
1090 return
1092 def installed_file_publisher(filepath):
1093 """Find the pkg's installed file named by filepath.
1094 Return the publisher that installed this package."""
1096 f = file(filepath)
1097 try:
1098 flines = f.readlines()
1099 version, pub = flines
1100 version = version.strip()
1101 pub = pub.strip()
1102 f.close()
1103 except ValueError:
1104 # If ValueError occurs, the installed file is of
1105 # a previous format. For upgrades to work, it's
1106 # necessary to assume that the package was
1107 # installed from the highest ranked publisher.
1108 # Here, the publisher is setup to record that.
1109 if flines:
1110 pub = flines[0]
1111 pub = pub.strip()
1112 newpub = "{0}_{1}".format(
1113 pkg.fmri.PREF_PUB_PFX, pub)
1114 else:
1115 newpub = "{0}_{1}".format(
1116 pkg.fmri.PREF_PUB_PFX,
1117 self.get_highest_ranked_publisher())
1118 pub = newpub
1119 assert pub
1120 return pub
1122 # First, load the old package state information.
1123 installed_state_dir = "{0}/state/installed".format(self.imgdir)
1125 # If the state directory structure has already been created,
1126 # loading information from it is fast. The directory is
1127 # populated with files, named by their (url-encoded) FMRI,
1128 # which point to the "installed" file in the corresponding
1129 # directory under /var/pkg.
1130 installed = {}
1131 def add_installed_entry(f):
1132 path = "{0}/pkg/{1}/installed".format(
1133 self.imgdir, f.get_dir_path())
1134 pub = installed_file_publisher(path)
1135 f.set_publisher(pub)
1136 installed[f.pkg_name] = f
1138 for pl in os.listdir(installed_state_dir):
1139 fmristr = "{0}".format(urllib.unquote(pl))
1140 f = pkg.fmri.PkgFmri(fmristr)
1141 add_installed_entry(f)
1143 # Create the new image catalogs.
1144 kcat = pkg.catalog.Catalog(batch_mode=True,
1145 manifest_cb=self._manifest_cb, sign=False)
1146 icat = pkg.catalog.Catalog(batch_mode=True,
1147 manifest_cb=self._manifest_cb, sign=False)
1149 # XXX For backwards compatibility, 'upgradability' of packages
1150 # is calculated and stored based on whether a given pkg stem
1151 # matches the newest version in the catalog. This is quite
1152 # expensive (due to overhead), but at least the cost is
1153 # consolidated here. This comparison is also cross-publisher,
1154 # as it used to be.
1155 newest = {}
1156 old_pub_cats = []
1157 for pub in self.gen_publishers():
1158 try:
1159 old_cat = pkg.server.catalog.ServerCatalog(
1160 pub.meta_root, read_only=True,
1161 publisher=pub.prefix)
1163 old_pub_cats.append((pub, old_cat))
1164 for f in old_cat.fmris():
1165 nver = newest.get(f.pkg_name, None)
1166 newest[f.pkg_name] = max(nver,
1167 f.version)
1169 except EnvironmentError as e:
1170 # If a catalog file is just missing, ignore it.
1171 # If there's a worse error, make sure the user
1172 # knows about it.
1173 if e.errno != errno.ENOENT:
1174 raise
1176 # Next, load the existing catalog data and convert it.
1177 pub_cats = []
1178 for pub, old_cat in old_pub_cats:
1179 new_cat = pub.catalog
1180 new_cat.batch_mode = True
1181 new_cat.sign = False
1182 if new_cat.exists:
1183 new_cat.destroy()
1185 # First convert the old publisher catalog to
1186 # the new format.
1187 for f in old_cat.fmris():
1188 new_cat.add_package(f)
1190 # Now populate the image catalogs.
1191 states = [pkgdefs.PKG_STATE_KNOWN,
1192 pkgdefs.PKG_STATE_V0]
1193 mdata = { "states": states }
1194 if f.version != newest[f.pkg_name]:
1195 states.append(
1196 pkgdefs.PKG_STATE_UPGRADABLE)
1198 inst_fmri = installed.get(f.pkg_name, None)
1199 if inst_fmri and \
1200 inst_fmri.version == f.version and \
1201 pkg.fmri.is_same_publisher(f.publisher,
1202 inst_fmri.publisher):
1203 states.append(
1204 pkgdefs.PKG_STATE_INSTALLED)
1205 if inst_fmri.preferred_publisher():
1206 # Strip the PREF_PUB_PFX.
1207 inst_fmri.set_publisher(
1208 inst_fmri.get_publisher())
1209 icat.add_package(f, metadata=mdata)
1210 del installed[f.pkg_name]
1211 kcat.add_package(f, metadata=mdata)
1213 # Normally, the Catalog's attributes are automatically
1214 # populated as a result of catalog operations. But in
1215 # this case, the new Catalog's attributes should match
1216 # those of the old catalog.
1217 old_lm = old_cat.last_modified()
1218 if old_lm:
1219 # Can be None for empty v0 catalogs.
1220 old_lm = pkg.catalog.ts_to_datetime(old_lm)
1221 new_cat.last_modified = old_lm
1222 new_cat.version = 0
1224 # Add to the list of catalogs to save.
1225 new_cat.batch_mode = False
1226 pub_cats.append(new_cat)
1228 # Discard the old catalog objects.
1229 old_pub_cats = None
1231 for f in installed.values():
1232 # Any remaining FMRIs need to be added to all of the
1233 # image catalogs.
1234 states = [pkgdefs.PKG_STATE_INSTALLED,
1235 pkgdefs.PKG_STATE_V0]
1236 mdata = { "states": states }
1237 # This package may be installed from a publisher that
1238 # is no longer known or has been disabled.
1239 if f.pkg_name in newest and \
1240 f.version != newest[f.pkg_name]:
1241 states.append(pkgdefs.PKG_STATE_UPGRADABLE)
1243 if f.preferred_publisher():
1244 # Strip the PREF_PUB_PFX.
1245 f.set_publisher(f.get_publisher())
1247 icat.add_package(f, metadata=mdata)
1248 kcat.add_package(f, metadata=mdata)
1250 for cat in pub_cats + [kcat, icat]:
1251 cat.finalize()
1253 # Cache converted catalogs so that operations can function as
1254 # expected if the on-disk format of the catalogs isn't upgraded.
1255 self.__catalogs[self.IMG_CATALOG_KNOWN] = kcat
1256 self.__catalogs[self.IMG_CATALOG_INSTALLED] = icat
1258 def __upgrade_image_format(self, progtrack, allow_unprivileged=False):
1259 """Private helper function for update_format."""
1261 try:
1262 # Ensure Image directory structure is valid.
1263 self.mkdirs()
1264 except apx.PermissionsException as e:
1265 if not allow_unprivileged:
1266 raise
1267 # An unprivileged user is attempting to use the
1268 # new client with an old image. Since none of
1269 # the changes can be saved, warn the user and
1270 # then return.
1272 # Raising an exception here would be a decidedly
1273 # bad thing as it would disrupt find_root, etc.
1274 return
1276 # This has to be done after the permissions check above.
1277 # First, create a new temporary root to store the converted
1278 # image metadata.
1279 tmp_root = self.imgdir + ".new"
1280 try:
1281 shutil.rmtree(tmp_root)
1282 except EnvironmentError as e:
1283 if e.errno in (errno.EROFS, errno.EPERM) and \
1284 allow_unprivileged:
1285 # Bail.
1286 return
1287 if e.errno != errno.ENOENT:
1288 raise apx._convert_error(e)
1290 try:
1291 self.mkdirs(root=tmp_root, version=self.CURRENT_VERSION)
1292 except apx.PermissionsException as e:
1293 # Same handling needed as above; but not after this.
1294 if not allow_unprivileged:
1295 raise
1296 return
1298 def linktree(src_root, dest_root):
1299 if not os.path.exists(src_root):
1300 # Nothing to do.
1301 return
1303 for entry in os.listdir(src_root):
1304 src = os.path.join(src_root, entry)
1305 dest = os.path.join(dest_root, entry)
1306 if os.path.isdir(src):
1307 # Recurse into directory to link
1308 # its contents.
1309 misc.makedirs(dest)
1310 linktree(src, dest)
1311 continue
1312 # Link source file into target dest.
1313 assert os.path.isfile(src)
1314 try:
1315 os.link(src, dest)
1316 except EnvironmentError as e:
1317 raise apx._convert_error(e)
1319 # Next, link history data into place.
1320 linktree(self.history.path, os.path.join(tmp_root,
1321 "history"))
1323 # Next, link index data into place.
1324 linktree(self.index_dir, os.path.join(tmp_root,
1325 "cache", "index"))
1327 # Next, link ssl data into place.
1328 linktree(os.path.join(self.imgdir, "ssl"),
1329 os.path.join(tmp_root, "ssl"))
1331 # Next, write state data into place.
1332 if self.version < 3:
1333 # Image state and publisher metadata
1334 tmp_state_root = os.path.join(tmp_root, "state")
1336 # Update image catalog locations.
1337 kcat = self.get_catalog(self.IMG_CATALOG_KNOWN)
1338 icat = self.get_catalog(self.IMG_CATALOG_INSTALLED)
1339 kcat.meta_root = os.path.join(tmp_state_root,
1340 self.IMG_CATALOG_KNOWN)
1341 icat.meta_root = os.path.join(tmp_state_root,
1342 self.IMG_CATALOG_INSTALLED)
1344 # Assume that since mkdirs succeeded that the remaining
1345 # data can be saved and the image structure can be
1346 # upgraded. But first, attempt to save the image
1347 # catalogs.
1348 for cat in icat, kcat:
1349 misc.makedirs(cat.meta_root)
1350 cat.save()
1351 else:
1352 # For version 3 and newer images, just link existing
1353 # state information into place.
1354 linktree(self._statedir, os.path.join(tmp_root,
1355 "state"))
1357 # Reset each publisher's meta_root and ensure its complete
1358 # directory structure is intact. Then either link in or
1359 # write out the metadata for each publisher.
1360 for pub in self.gen_publishers():
1361 old_root = pub.meta_root
1362 old_cat_root = pub.catalog_root
1363 old_cert_root = pub.cert_root
1364 pub.meta_root = os.path.join(tmp_root,
1365 IMG_PUB_DIR, pub.prefix)
1366 pub.create_meta_root()
1368 if self.version < 3:
1369 # Should be loaded in memory and transformed
1370 # already, so just need to be written out.
1371 pub.catalog.save()
1372 continue
1374 # Now link any catalog or cert files from the old root
1375 # into the new root.
1376 linktree(old_cat_root, pub.catalog_root)
1377 linktree(old_cert_root, pub.cert_root)
1379 # Finally, create a directory for the publisher's
1380 # manifests to live in.
1381 misc.makedirs(os.path.join(pub.meta_root, "pkg"))
1383 # Next, link licenses and manifests of installed packages into
1384 # new image dir.
1385 for pfmri in self.gen_installed_pkgs():
1386 # Link licenses.
1387 mdir = self.get_manifest_dir(pfmri)
1388 for entry in os.listdir(mdir):
1389 if not entry.startswith("license."):
1390 continue
1391 src = os.path.join(mdir, entry)
1392 if os.path.isdir(src):
1393 # Ignore broken licenses.
1394 continue
1396 # For conversion, ensure destination link uses
1397 # encoded license name to match how new image
1398 # format stores licenses.
1399 dest = os.path.join(tmp_root, "license",
1400 pfmri.get_dir_path(stemonly=True),
1401 urllib.quote(entry, ""))
1402 misc.makedirs(os.path.dirname(dest))
1403 try:
1404 os.link(src, dest)
1405 except EnvironmentError as e:
1406 raise apx._convert_error(e)
1408 # Link manifest.
1409 src = self.get_manifest_path(pfmri)
1410 dest = os.path.join(tmp_root, "publisher",
1411 pfmri.publisher, "pkg", pfmri.get_dir_path())
1412 misc.makedirs(os.path.dirname(dest))
1413 try:
1414 os.link(src, dest)
1415 except EnvironmentError as e:
1416 raise apx._convert_error(e)
1418 # Next, copy the old configuration into the new location using
1419 # the new name. The configuration is copied instead of being
1420 # linked so that any changes to configuration as a result of
1421 # the upgrade won't be written into the old image directory.
1422 src = os.path.join(self.imgdir, "disabled_auth")
1423 if os.path.exists(src):
1424 dest = os.path.join(tmp_root, "disabled_auth")
1425 portable.copyfile(src, dest)
1427 src = self.cfg.target
1428 dest = os.path.join(tmp_root, "pkg5.image")
1429 try:
1430 portable.copyfile(src, dest)
1431 except EnvironmentError as e:
1432 raise apx._convert_error(e)
1434 # Update the new configuration's version information and then
1435 # write it out again.
1436 newcfg = imageconfig.ImageConfig(dest, tmp_root,
1437 version=3, overrides={ "image": {
1438 "version": self.CURRENT_VERSION } })
1439 newcfg._version = 3
1440 newcfg.write()
1442 # Now reload configuration and write again to configuration data
1443 # reflects updated version information.
1444 newcfg.reset()
1445 newcfg.write()
1447 # Finally, rename the old package metadata directory, then
1448 # rename the new one into place, and then reinitialize. The
1449 # old data will be dumped during initialization.
1450 orig_root = self.imgdir + ".old"
1451 try:
1452 portable.rename(self.imgdir, orig_root)
1453 portable.rename(tmp_root, self.imgdir)
1455 # /var/pkg/repo is renamed into place instead of being
1456 # linked piece-by-piece for performance reasons.
1457 # Crawling the entire tree structure of a repository is
1458 # far slower than simply renaming the top level
1459 # directory (since it often has thousands or millions
1460 # of objects).
1461 old_repo = os.path.join(orig_root, "repo")
1462 if os.path.exists(old_repo):
1463 new_repo = os.path.join(tmp_root, "repo")
1464 portable.rename(old_repo, new_repo)
1465 except EnvironmentError as e:
1466 raise apx._convert_error(e)
1467 self.find_root(self.root, exact_match=True, progtrack=progtrack)
1469 def create(self, pubs, facets=EmptyDict, is_zone=False, progtrack=None,
1470 props=EmptyDict, refresh_allowed=True, variants=EmptyDict):
1471 """Creates a new image with the given attributes if it does not
1472 exist; should not be used with an existing image.
1474 'is_zone' is a boolean indicating whether the image is a zone.
1476 'pubs' is a list of Publisher objects to configure the image
1477 with.
1479 'refresh_allowed' is an optional boolean indicating that
1480 network operations (such as publisher data retrieval) are
1481 allowed.
1483 'progtrack' is an optional ProgressTracker object.
1485 'props' is an option dictionary mapping image property names to
1486 values.
1488 'variants' is an optional dictionary of variant names and
1489 values.
1491 'facets' is an optional dictionary of facet names and values.
1494 for p in pubs:
1495 p.meta_root = self._get_publisher_meta_root(p.prefix)
1496 p.transport = self.transport
1498 # Override any initial configuration information.
1499 self.set_properties(props)
1501 # Start the operation.
1502 self.history.log_operation_start("image-create")
1504 # Determine and add the default variants for the image.
1505 if is_zone:
1506 self.cfg.variants["variant.opensolaris.zone"] = \
1507 "nonglobal"
1508 else:
1509 self.cfg.variants["variant.opensolaris.zone"] = \
1510 "global"
1512 self.cfg.variants["variant.arch"] = \
1513 variants.get("variant.arch", platform.processor())
1515 # After setting up the default variants, add any overrides or
1516 # additional variants or facets specified.
1517 self.cfg.variants.update(variants)
1518 self.cfg.facets.update(facets)
1520 # Now everything is ready for publisher configuration.
1521 # Since multiple publishers are allowed, they are all
1522 # added at once without any publisher data retrieval.
1523 # A single retrieval is then performed afterwards, if
1524 # allowed, to minimize the amount of work the client
1525 # needs to perform.
1526 for p in pubs:
1527 self.add_publisher(p, refresh_allowed=False,
1528 progtrack=progtrack)
1530 if refresh_allowed:
1531 self.refresh_publishers(progtrack=progtrack,
1532 full_refresh=True)
1533 else:
1534 # initialize empty catalogs on disk
1535 self.__rebuild_image_catalogs(progtrack=progtrack)
1537 self.cfg.set_property("property", "publisher-search-order",
1538 [p.prefix for p in pubs])
1540 # Ensure publisher search order is written.
1541 self.save_config()
1543 self.history.log_operation_end()
1545 @staticmethod
1546 def __allow_liveroot():
1547 """Check if we're allowed to access the current live root
1548 image."""
1550 # if we're simulating a live root then allow access to it
1551 if DebugValues.get_value("simulate_live_root") or \
1552 "PKG_LIVE_ROOT" in os.environ:
1553 return True
1555 # check if the user disabled access to the live root
1556 if DebugValues.get_value("simulate_no_live_root"):
1557 return False
1558 if "PKG_NO_LIVE_ROOT" in os.environ:
1559 return False
1561 # by default allow access to the live root
1562 return True
1564 def is_liveroot(self):
1565 return bool(self.root == misc.liveroot())
1567 def is_zone(self):
1568 return self.cfg.variants["variant.opensolaris.zone"] == \
1569 "nonglobal"
1571 def get_arch(self):
1572 return self.cfg.variants["variant.arch"]
1574 def has_boot_archive(self):
1575 """Returns True if a boot_archive is present in this image"""
1576 if self.__boot_archive is not None:
1577 return self.__boot_archive
1579 for p in ["platform/i86pc/amd64/boot_archive",
1580 "platform/i86pc/boot_archive",
1581 "platform/sun4u/boot_archive",
1582 "platform/sun4v/boot_archive"]:
1583 if os.path.isfile(os.path.join(self.root, p)):
1584 self.__boot_archive = True
1585 break
1586 else:
1587 self.__boot_archive = False
1588 return self.__boot_archive
1590 def get_ramdisk_filelist(self):
1591 """return the filelist... add the filelist so we rebuild
1592 boot archive if it changes... append trailing / to
1593 directories that are really there"""
1595 p = "boot/solaris/filelist.ramdisk"
1596 f = os.path.join(self.root, p)
1598 def addslash(path):
1599 if os.path.isdir(os.path.join(self.root, path)):
1600 return path + "/"
1601 return path
1603 if not os.path.isfile(f):
1604 return []
1606 return [ addslash(l.strip()) for l in file(f) ] + [p]
1608 def get_cachedirs(self):
1609 """Returns a list of tuples of the form (dir, readonly, pub,
1610 layout) where 'dir' is the absolute path of the cache directory,
1611 'readonly' is a boolean indicating whether the cache can
1612 be written to, 'pub' is the prefix of the publisher that
1613 the cache directory should be used for, and 'layout' is a
1614 FileManager object used to access file content in the cache.
1615 If 'pub' is None, the cache directory is intended for all
1616 publishers. If 'layout' is None, file content layout can
1617 vary.
1620 file_layout = None
1621 if self.version >= 4:
1622 # Assume cache directories are in V1 Layout if image
1623 # format is v4+.
1624 file_layout = fl.V1Layout()
1626 # Get all readonly cache directories.
1627 cdirs = [
1628 (cdir, True, None, file_layout)
1629 for cdir in self.__read_cache_dirs
1632 # Get global write cache directory.
1633 if self.__write_cache_dir:
1634 cdirs.append((self.__write_cache_dir, False, None,
1635 file_layout))
1637 # For images newer than version 3, file data can be stored
1638 # in the publisher's file root.
1639 if self.version == self.CURRENT_VERSION:
1640 for pub in self.gen_publishers(inc_disabled=True):
1641 froot = os.path.join(pub.meta_root, "file")
1642 readonly = False
1643 if self.__write_cache_dir or \
1644 self.__write_cache_root:
1645 readonly = True
1646 cdirs.append((froot, readonly, pub.prefix,
1647 file_layout))
1649 if self.__write_cache_root:
1650 # Cache is a tree structure like
1651 # /var/pkg/publisher.
1652 froot = os.path.join(
1653 self.__write_cache_root, pub.prefix,
1654 "file")
1655 cdirs.append((froot, False, pub.prefix,
1656 file_layout))
1658 return cdirs
1660 def get_root(self):
1661 return self.root
1663 def get_last_modified(self, string=False):
1664 """Return the UTC time of the image's last state change or
1665 None if unknown. By default the time is returned via datetime
1666 object. If 'string' is true and a time is available, then the
1667 time is returned as a string (instead of as a datetime
1668 object)."""
1670 # Always get last_modified time from known catalog. It's
1671 # retrieved from the catalog itself since that is accurate
1672 # down to the micrsecond (as opposed to the filesystem which
1673 # has an OS-specific resolution).
1674 rv = self.__get_catalog(self.IMG_CATALOG_KNOWN).last_modified
1675 if rv is None or not string:
1676 return rv
1677 return rv.strftime("%Y-%m-%dT%H:%M:%S.%f")
1679 def gen_publishers(self, inc_disabled=False):
1680 if not self.cfg:
1681 raise apx.ImageCfgEmptyError(self.root)
1683 alt_pubs = {}
1684 if self.__alt_pkg_pub_map:
1685 alt_src_pubs = dict(
1686 (p.prefix, p)
1687 for p in self.__alt_pubs
1690 for pfx in self.__alt_known_cat.publishers():
1691 # Include alternate package source publishers
1692 # in result, and temporarily enable any
1693 # disabled publishers that already exist in
1694 # the image configuration.
1695 try:
1696 img_pub = self.cfg.publishers[pfx]
1698 if not img_pub.disabled:
1699 # No override needed.
1700 continue
1701 new_pub = copy.copy(img_pub)
1702 new_pub.disabled = False
1704 # Discard origins and mirrors to prevent
1705 # their accidental use.
1706 repo = new_pub.repository
1707 repo.reset_origins()
1708 repo.reset_mirrors()
1709 except KeyError:
1710 new_pub = alt_src_pubs[pfx]
1712 alt_pubs[pfx] = new_pub
1714 publishers = [
1715 alt_pubs.get(p.prefix, p)
1716 for p in self.cfg.publishers.values()
1718 publishers.extend((
1719 p for p in alt_pubs.values()
1720 if p not in publishers
1723 for pub in publishers:
1724 # Prepare publishers for transport usage; this must be
1725 # done each time so that information reflects current
1726 # image state. This is done whether or not the
1727 # publisher is returned so that in-memory state is
1728 # always current.
1729 pub.meta_root = self._get_publisher_meta_root(
1730 pub.prefix)
1731 pub.transport = self.transport
1732 if inc_disabled or not pub.disabled:
1733 yield pub
1735 def get_publisher_ranks(self):
1736 """Return dictionary of configured + enabled publishers and
1737 unconfigured publishers which still have packages installed.
1739 Each entry contains a tuple of search order index starting at
1740 0, and a boolean indicating whether or not this publisher is
1741 "sticky", and a boolean indicating whether or not the
1742 publisher is enabled"""
1744 pubs = self.get_sorted_publishers(inc_disabled=False)
1745 ret = dict([
1746 (pubs[i].prefix, (i, pubs[i].sticky, True))
1747 for i in range(0, len(pubs))
1750 # Add any publishers for pkgs that are installed,
1751 # but have been deleted. These publishers are implicitly
1752 # not-sticky and disabled.
1753 for pub in self.get_installed_pubs():
1754 i = len(ret)
1755 ret.setdefault(pub, (i, False, False))
1756 return ret
1758 def get_highest_ranked_publisher(self):
1759 """Return the highest ranked publisher."""
1761 pubs = self.cfg.get_property("property",
1762 "publisher-search-order")
1763 if pubs:
1764 return self.get_publisher(prefix=pubs[0])
1765 for p in self.gen_publishers():
1766 return p
1767 for p in self.get_installed_pubs():
1768 return publisher.Publisher(p)
1769 return None
1771 def check_cert_validity(self, pubs=EmptyI):
1772 """Validate the certificates of the specified publishers.
1774 Raise an exception if any of the certificates has expired or
1775 is close to expiring."""
1777 if not pubs:
1778 pubs = self.gen_publishers()
1780 errors = []
1781 for p in pubs:
1782 r = p.repository
1783 for uri in r.origins:
1784 if uri.ssl_cert:
1785 try:
1786 misc.validate_ssl_cert(
1787 uri.ssl_cert,
1788 prefix=p.prefix,
1789 uri=uri)
1790 except apx.ExpiredCertificate as e:
1791 errors.append(e)
1793 if uri.ssl_key:
1794 try:
1795 if not os.path.exists(
1796 uri.ssl_key):
1797 raise apx.NoSuchKey(
1798 uri.ssl_key,
1799 publisher=p,
1800 uri=uri)
1801 except EnvironmentError as e:
1802 raise apx._convert_error(e)
1804 if errors:
1805 raise apx.ExpiredCertificates(errors)
1807 def has_publisher(self, prefix=None, alias=None):
1808 """Returns a boolean value indicating whether a publisher
1809 exists in the image configuration that matches the given
1810 prefix or alias."""
1811 for pub in self.gen_publishers(inc_disabled=True):
1812 if prefix == pub.prefix or (alias and
1813 alias == pub.alias):
1814 return True
1815 return False
1817 def remove_publisher(self, prefix=None, alias=None, progtrack=None):
1818 """Removes the publisher with the matching identity from the
1819 image."""
1821 if not progtrack:
1822 progtrack = progress.NullProgressTracker()
1824 with self.locked_op("remove-publisher"):
1825 pub = self.get_publisher(prefix=prefix,
1826 alias=alias)
1828 self.cfg.remove_publisher(pub.prefix)
1829 self.remove_publisher_metadata(pub, progtrack=progtrack)
1830 self.save_config()
1832 def get_publishers(self, inc_disabled=True):
1833 """Return a dictionary of configured publishers. This doesn't
1834 include unconfigured publishers which still have packages
1835 installed."""
1837 return dict(
1838 (p.prefix, p)
1839 for p in self.gen_publishers(inc_disabled=inc_disabled)
1842 def get_sorted_publishers(self, inc_disabled=True):
1843 """Return a list of configured publishers sorted by rank.
1844 This doesn't include unconfigured publishers which still have
1845 packages installed."""
1847 d = self.get_publishers(inc_disabled=inc_disabled)
1848 names = self.cfg.get_property("property",
1849 "publisher-search-order")
1852 # If someone has been editing the config file we may have
1853 # unranked publishers. Also, as publisher come and go via the
1854 # sysrepo we can end up with configured but unranked
1855 # publishers. In either case just sort unranked publishers
1856 # alphabetically.
1858 unranked = set(d) - set(names)
1859 ret = [
1860 d[n]
1861 for n in names
1862 if n in d
1863 ] + [
1864 d[n]
1865 for n in sorted(unranked)
1867 return ret
1869 def get_publisher(self, prefix=None, alias=None, origin=None):
1870 for pub in self.gen_publishers(inc_disabled=True):
1871 if prefix and prefix == pub.prefix:
1872 return pub
1873 elif alias and alias == pub.alias:
1874 return pub
1875 elif origin and pub.repository and \
1876 pub.repository.has_origin(origin):
1877 return pub
1878 raise apx.UnknownPublisher(max(prefix, alias, origin))
1880 def pub_search_before(self, being_moved, staying_put):
1881 """Moves publisher "being_moved" to before "staying_put"
1882 in search order.
1884 The caller is responsible for locking the image."""
1886 self.cfg.change_publisher_search_order(being_moved, staying_put,
1887 after=False)
1889 def pub_search_after(self, being_moved, staying_put):
1890 """Moves publisher "being_moved" to after "staying_put"
1891 in search order.
1893 The caller is responsible for locking the image."""
1895 self.cfg.change_publisher_search_order(being_moved, staying_put,
1896 after=True)
1898 def __apply_alt_pkg_sources(self, img_kcat):
1899 pkg_pub_map = self.__alt_pkg_pub_map
1900 if not pkg_pub_map or self.__alt_pkg_sources_loaded:
1901 # No alternate sources to merge.
1902 return
1904 # Temporarily merge the package metadata in the alternate
1905 # known package catalog for packages not listed in the
1906 # image's known catalog.
1907 def merge_check(alt_kcat, pfmri, new_entry):
1908 states = new_entry["metadata"]["states"]
1909 if pkgdefs.PKG_STATE_INSTALLED in states:
1910 # Not interesting; already installed.
1911 return False, None
1912 img_entry = img_kcat.get_entry(pfmri=pfmri)
1913 if not img_entry is None:
1914 # Already in image known catalog.
1915 return False, None
1916 return True, new_entry
1918 img_kcat.append(self.__alt_known_cat, cb=merge_check)
1919 img_kcat.finalize()
1921 self.__alt_pkg_sources_loaded = True
1922 self.transport.cfg.pkg_pub_map = self.__alt_pkg_pub_map
1923 self.transport.cfg.alt_pubs = self.__alt_pubs
1924 self.transport.cfg.reset_caches()
1926 def __cleanup_alt_pkg_certs(self):
1927 """Private helper function to cleanup package certificate
1928 information after use of temporary package data."""
1930 if not self.__alt_pubs:
1931 return
1933 # Cleanup publisher cert information; any certs not retrieved
1934 # retrieved during temporary publisher use need to be expunged
1935 # from the image configuration.
1936 for pub in self.__alt_pubs:
1937 try:
1938 ipub = self.cfg.publishers[pub.prefix]
1939 except KeyError:
1940 # Nothing to do.
1941 continue
1943 def set_alt_pkg_sources(self, alt_sources):
1944 """Specifies an alternate source of package metadata to be
1945 temporarily merged with image state so that it can be used
1946 as part of packaging operations."""
1948 if not alt_sources:
1949 self.__init_catalogs()
1950 self.__alt_pkg_pub_map = None
1951 self.__alt_pubs = None
1952 self.__alt_known_cat = None
1953 self.__alt_pkg_sources_loaded = False
1954 self.transport.cfg.pkg_pub_map = None
1955 self.transport.cfg.alt_pubs = None
1956 self.transport.cfg.reset_caches()
1957 return
1958 elif self.__alt_pkg_sources_loaded:
1959 # Ensure existing alternate package source data
1960 # is not part of temporary image state.
1961 self.__init_catalogs()
1963 pkg_pub_map, alt_pubs, alt_kcat, ignored = alt_sources
1964 self.__alt_pkg_pub_map = pkg_pub_map
1965 self.__alt_pubs = alt_pubs
1966 self.__alt_known_cat = alt_kcat
1968 def set_highest_ranked_publisher(self, prefix=None, alias=None,
1969 pub=None):
1970 """Sets the preferred publisher for packaging operations.
1972 'prefix' is an optional string value specifying the name of
1973 a publisher; ignored if 'pub' is provided.
1975 'alias' is an optional string value specifying the alias of
1976 a publisher; ignored if 'pub' is provided.
1978 'pub' is an optional Publisher object identifying the
1979 publisher to set as the preferred publisher.
1981 One of the above parameters must be provided.
1983 The caller is responsible for locking the image."""
1985 if not pub:
1986 pub = self.get_publisher(prefix=prefix, alias=alias)
1987 if not self.cfg.allowed_to_move(pub):
1988 raise apx.ModifyingSyspubException(_("Publisher '{0}' "
1989 "is a system publisher and cannot be "
1990 "moved.").format(pub))
1992 pubs = self.get_sorted_publishers()
1993 relative = None
1994 for p in pubs:
1995 # If we've gotten to the publisher we want to make
1996 # highest ranked, then there's nothing to do because
1997 # it's already as high as it can be.
1998 if p == pub:
1999 return
2000 if self.cfg.allowed_to_move(p):
2001 relative = p
2002 break
2003 assert relative, "Expected {0} to already be part of the " + \
2004 "search order:{1}".format(relative, ranks)
2005 self.cfg.change_publisher_search_order(pub.prefix,
2006 relative.prefix, after=False)
2008 def set_property(self, prop_name, prop_value):
2009 with self.locked_op("set-property"):
2010 self.cfg.set_property("property", prop_name,
2011 prop_value)
2012 self.save_config()
2014 def set_properties(self, properties):
2015 properties = { "property": properties }
2016 with self.locked_op("set-property"):
2017 self.cfg.set_properties(properties)
2018 self.save_config()
2020 def get_property(self, prop_name):
2021 return self.cfg.get_property("property", prop_name)
2023 def has_property(self, prop_name):
2024 try:
2025 self.cfg.get_property("property", prop_name)
2026 return True
2027 except cfg.ConfigError:
2028 return False
2030 def delete_property(self, prop_name):
2031 with self.locked_op("unset-property"):
2032 self.cfg.remove_property("property", prop_name)
2033 self.save_config()
2035 def add_property_value(self, prop_name, prop_value):
2036 with self.locked_op("add-property-value"):
2037 self.cfg.add_property_value("property", prop_name,
2038 prop_value)
2039 self.save_config()
2041 def remove_property_value(self, prop_name, prop_value):
2042 with self.locked_op("remove-property-value"):
2043 self.cfg.remove_property_value("property", prop_name,
2044 prop_value)
2045 self.save_config()
2047 def destroy(self):
2048 """Destroys the image; image object should not be used
2049 afterwards."""
2051 if not self.imgdir or not os.path.exists(self.imgdir):
2052 return
2054 if os.path.abspath(self.imgdir) == "/":
2055 # Paranoia.
2056 return
2058 try:
2059 shutil.rmtree(self.imgdir)
2060 except EnvironmentError as e:
2061 raise apx._convert_error(e)
2063 def properties(self):
2064 if not self.cfg:
2065 raise apx.ImageCfgEmptyError(self.root)
2066 return self.cfg.get_index()["property"].keys()
2068 def add_publisher(self, pub, refresh_allowed=True, progtrack=None,
2069 approved_cas=EmptyI, revoked_cas=EmptyI, search_after=None,
2070 search_before=None, search_first=None, unset_cas=EmptyI):
2071 """Adds the provided publisher object to the image
2072 configuration.
2074 'refresh_allowed' is an optional, boolean value indicating
2075 whether the publisher's metadata should be retrieved when adding
2076 it to the image's configuration.
2078 'progtrack' is an optional ProgressTracker object."""
2080 with self.locked_op("add-publisher"):
2081 return self.__add_publisher(pub,
2082 refresh_allowed=refresh_allowed,
2083 progtrack=progtrack, approved_cas=EmptyI,
2084 revoked_cas=EmptyI, search_after=search_after,
2085 search_before=search_before,
2086 search_first=search_first, unset_cas=EmptyI)
2088 def __update_publisher_catalogs(self, pub, progtrack=None,
2089 refresh_allowed=True):
2090 # Ensure that if the publisher's meta directory already
2091 # exists for some reason that the data within is not
2092 # used.
2093 self.remove_publisher_metadata(pub, progtrack=progtrack,
2094 rebuild=False)
2096 repo = pub.repository
2097 if refresh_allowed and repo.origins:
2098 try:
2099 # First, verify that the publisher has a
2100 # valid pkg(5) repository.
2101 self.transport.valid_publisher_test(pub)
2102 pub.validate_config()
2103 self.refresh_publishers(pubs=[pub],
2104 progtrack=progtrack)
2105 except Exception as e:
2106 # Remove the newly added publisher since
2107 # it is invalid or the retrieval failed.
2108 if not pub.sys_pub:
2109 self.cfg.remove_publisher(pub.prefix)
2110 raise
2111 except:
2112 # Remove the newly added publisher since
2113 # the retrieval failed.
2114 if not pub.sys_pub:
2115 self.cfg.remove_publisher(pub.prefix)
2116 raise
2118 def __add_publisher(self, pub, refresh_allowed=True, progtrack=None,
2119 approved_cas=EmptyI, revoked_cas=EmptyI, search_after=None,
2120 search_before=None, search_first=None, unset_cas=EmptyI):
2121 """Private version of add_publisher(); caller is responsible
2122 for locking."""
2124 assert (not search_after and not search_before) or \
2125 (not search_after and not search_first) or \
2126 (not search_before and not search_first)
2128 if self.version < self.CURRENT_VERSION:
2129 raise apx.ImageFormatUpdateNeeded(self.root)
2131 for p in self.cfg.publishers.values():
2132 if pub.prefix == p.prefix or \
2133 pub.prefix == p.alias or \
2134 pub.alias and (pub.alias == p.alias or
2135 pub.alias == p.prefix):
2136 raise apx.DuplicatePublisher(pub)
2138 if not progtrack:
2139 progtrack = progress.NullProgressTracker()
2141 # Must assign this first before performing operations.
2142 pub.meta_root = self._get_publisher_meta_root(
2143 pub.prefix)
2144 pub.transport = self.transport
2146 # Before continuing, validate SSL information.
2147 try:
2148 self.check_cert_validity(pubs=[pub])
2149 except apx.ExpiringCertificate as e:
2150 logger.error(str(e))
2152 self.cfg.publishers[pub.prefix] = pub
2154 self.__update_publisher_catalogs(pub, progtrack=progtrack,
2155 refresh_allowed=refresh_allowed)
2157 for ca in approved_cas:
2158 try:
2159 ca = os.path.abspath(ca)
2160 fh = open(ca, "rb")
2161 s = fh.read()
2162 fh.close()
2163 except EnvironmentError as e:
2164 if e.errno == errno.ENOENT:
2165 raise apx.MissingFileArgumentException(
2167 raise apx._convert_error(e)
2168 pub.approve_ca_cert(s, manual=True)
2170 for hsh in revoked_cas:
2171 pub.revoke_ca_cert(hsh)
2173 for hsh in unset_cas:
2174 pub.unset_ca_cert(hsh)
2176 if search_first:
2177 self.set_highest_ranked_publisher(prefix=pub.prefix)
2178 elif search_before:
2179 self.pub_search_before(pub.prefix, search_before)
2180 elif search_after:
2181 self.pub_search_after(pub.prefix, search_after)
2183 # Only after success should the configuration be saved.
2184 self.save_config()
2186 def verify(self, fmri, progresstracker, **kwargs):
2187 """Generator that returns a tuple of the form (action, errors,
2188 warnings, info) if there are any error, warning, or other
2189 messages about an action contained within the specified
2190 package. Where the returned messages are lists of strings
2191 indicating fatal problems, potential issues (that can be
2192 ignored), or extra information to be displayed respectively.
2194 'fmri' is the fmri of the package to verify.
2196 'progresstracker' is a ProgressTracker object.
2198 'kwargs' is a dict of additional keyword arguments to be passed
2199 to each action verification routine."""
2201 try:
2202 pub = self.get_publisher(prefix=fmri.publisher)
2203 except apx.UnknownPublisher:
2204 # Since user removed publisher, assume this is the same
2205 # as if they had set signature-policy ignore for the
2206 # publisher.
2207 sig_pol = None
2208 else:
2209 sig_pol = self.signature_policy.combine(
2210 pub.signature_policy)
2212 progresstracker.plan_add_progress(
2213 progresstracker.PLAN_PKG_VERIFY)
2214 manf = self.get_manifest(fmri, ignore_excludes=True)
2215 sigs = list(manf.gen_actions_by_type("signature",
2216 excludes=self.list_excludes()))
2217 if sig_pol and (sigs or sig_pol.name != "ignore"):
2218 # Only perform signature verification logic if there are
2219 # signatures or if signature-policy is not 'ignore'.
2220 try:
2221 # Signature verification must be done using all
2222 # the actions from the manifest, not just the
2223 # ones for this image's variants.
2224 sig_pol.process_signatures(sigs,
2225 manf.gen_actions(), pub, self.trust_anchors,
2226 self.cfg.get_policy(
2227 "check-certificate-revocation"))
2228 except apx.SigningException as e:
2229 e.pfmri = fmri
2230 yield e.sig, [e], [], []
2231 except apx.InvalidResourceLocation as e:
2232 yield None, [e], [], []
2234 progresstracker.plan_add_progress(
2235 progresstracker.PLAN_PKG_VERIFY, nitems=0)
2236 def mediation_allowed(act):
2237 """Helper function to determine if the mediation
2238 delivered by a link is allowed. If it is, then
2239 the link should be verified. (Yes, this does mean
2240 that the non-existence of links is not verified.)
2243 mediator = act.attrs.get("mediator")
2244 if not mediator or mediator not in self.cfg.mediators:
2245 # Link isn't mediated or mediation is unknown.
2246 return True
2248 cfg_med_version = self.cfg.mediators[mediator].get(
2249 "version")
2250 cfg_med_impl = self.cfg.mediators[mediator].get(
2251 "implementation")
2253 med_version = act.attrs.get("mediator-version")
2254 if med_version:
2255 med_version = pkg.version.Version(
2256 med_version)
2257 med_impl = act.attrs.get("mediator-implementation")
2259 return med_version == cfg_med_version and \
2260 med.mediator_impl_matches(med_impl, cfg_med_impl)
2262 # pkg verify only looks at actions that have not been dehydrated.
2263 excludes = self.list_excludes()
2264 vardrate_excludes = [self.cfg.variants.allow_action]
2265 dehydrate = self.cfg.get_property("property", "dehydrated")
2266 if dehydrate:
2267 func = self.get_dehydrated_exclude_func(dehydrate)
2268 excludes.append(func)
2269 vardrate_excludes.append(func)
2271 for act in manf.gen_actions():
2272 progresstracker.plan_add_progress(
2273 progresstracker.PLAN_PKG_VERIFY, nitems=0)
2274 if (act.name == "link" or
2275 act.name == "hardlink") and \
2276 not mediation_allowed(act):
2277 # Link doesn't match configured
2278 # mediation, so shouldn't be verified.
2279 continue
2281 errors = []
2282 warnings = []
2283 info = []
2284 if act.include_this(excludes, publisher=fmri.publisher):
2285 errors, warnings, info = act.verify(
2286 self, pfmri=fmri, **kwargs)
2287 elif act.include_this(vardrate_excludes,
2288 publisher=fmri.publisher) and not act.refcountable:
2289 # Verify that file that is faceted out does not
2290 # exist. Exclude actions which may be delivered
2291 # from multiple packages.
2292 path = act.attrs.get("path", None)
2293 if path is not None and os.path.exists(
2294 os.path.join(self.root, path)):
2295 errors.append(
2296 _("File should not exist"))
2297 else:
2298 # Action that is not applicable to image variant
2299 # or has been dehydrated.
2300 continue
2302 if errors or warnings or info:
2303 yield act, errors, warnings, info
2305 def image_config_update(self, new_variants, new_facets, new_mediators):
2306 """update variants in image config"""
2308 if new_variants is not None:
2309 self.cfg.variants.update(new_variants)
2310 if new_facets is not None:
2311 self.cfg.facets = new_facets
2312 if new_mediators is not None:
2313 self.cfg.mediators = new_mediators
2314 self.save_config()
2316 def __verify_manifest(self, fmri, mfstpath, alt_pub=None):
2317 """Verify a manifest. The caller must supply the FMRI
2318 for the package in 'fmri', as well as the path to the
2319 manifest file that will be verified."""
2321 try:
2322 return self.transport._verify_manifest(fmri,
2323 mfstpath=mfstpath, pub=alt_pub)
2324 except InvalidContentException:
2325 return False
2327 def has_manifest(self, pfmri, alt_pub=None):
2328 """Check to see if the manifest for pfmri is present on disk and
2329 has the correct hash."""
2331 pth = self.get_manifest_path(pfmri)
2332 on_disk = os.path.exists(pth)
2334 if not on_disk or \
2335 self.is_pkg_installed(pfmri) or \
2336 self.__verify_manifest(fmri=pfmri, mfstpath=pth, alt_pub=alt_pub):
2337 return on_disk
2338 return False
2340 def get_license_dir(self, pfmri):
2341 """Return path to package license directory."""
2342 if self.version == self.CURRENT_VERSION:
2343 # Newer image format stores license files per-stem,
2344 # instead of per-stem and version, so that transitions
2345 # between package versions don't require redelivery
2346 # of license files.
2347 return os.path.join(self.imgdir, "license",
2348 pfmri.get_dir_path(stemonly=True))
2349 # Older image formats store license files in the manifest cache
2350 # directory.
2351 return self.get_manifest_dir(pfmri)
2353 def __get_installed_pkg_publisher(self, pfmri):
2354 """Returns the publisher for the FMRI of an installed package
2355 or None if the package is not installed.
2357 for f in self.gen_installed_pkgs():
2358 if f.pkg_name == pfmri.pkg_name:
2359 return f.publisher
2360 return None
2362 def get_manifest_dir(self, pfmri):
2363 """Return path to on-disk manifest cache directory."""
2364 if not pfmri.publisher:
2365 # Needed for consumers such as search that don't provide
2366 # publisher information.
2367 pfmri = pfmri.copy()
2368 pfmri.publisher = self.__get_installed_pkg_publisher(
2369 pfmri)
2370 assert pfmri.publisher
2371 if self.version == self.CURRENT_VERSION:
2372 root = self._get_publisher_cache_root(pfmri.publisher)
2373 else:
2374 root = self.imgdir
2375 return os.path.join(root, "pkg", pfmri.get_dir_path())
2377 def get_manifest_path(self, pfmri):
2378 """Return path to on-disk manifest file."""
2379 if not pfmri.publisher:
2380 # Needed for consumers such as search that don't provide
2381 # publisher information.
2382 pfmri = pfmri.copy()
2383 pfmri.publisher = self.__get_installed_pkg_publisher(
2384 pfmri)
2385 assert pfmri.publisher
2386 if self.version == self.CURRENT_VERSION:
2387 root = os.path.join(self._get_publisher_meta_root(
2388 pfmri.publisher))
2389 return os.path.join(root, "pkg", pfmri.get_dir_path())
2390 return os.path.join(self.get_manifest_dir(pfmri),
2391 "manifest")
2393 def __get_manifest(self, fmri, excludes=EmptyI, intent=None,
2394 alt_pub=None):
2395 """Find on-disk manifest and create in-memory Manifest
2396 object.... grab from server if needed"""
2398 try:
2399 if not self.has_manifest(fmri, alt_pub=alt_pub):
2400 raise KeyError
2401 ret = manifest.FactoredManifest(fmri,
2402 self.get_manifest_dir(fmri),
2403 excludes=excludes,
2404 pathname=self.get_manifest_path(fmri))
2406 # if we have a intent string, let depot
2407 # know for what we're using the cached manifest
2408 if intent:
2409 alt_repo = None
2410 if alt_pub:
2411 alt_repo = alt_pub.repository
2412 try:
2413 self.transport.touch_manifest(fmri,
2414 intent, alt_repo=alt_repo)
2415 except (apx.UnknownPublisher,
2416 apx.TransportError):
2417 # It's not fatal if we can't find
2418 # or reach the publisher.
2419 pass
2420 except KeyError:
2421 ret = self.transport.get_manifest(fmri, excludes,
2422 intent, pub=alt_pub)
2423 return ret
2425 def get_manifest(self, fmri, ignore_excludes=False, intent=None,
2426 alt_pub=None):
2427 """return manifest; uses cached version if available.
2428 ignore_excludes controls whether manifest contains actions
2429 for all variants
2431 If 'ignore_excludes' is set to True, then all actions in the
2432 manifest are included, regardless of variant or facet tags. If
2433 set to False, then the variants and facets currently set in the
2434 image will be applied, potentially filtering out some of the
2435 actions."""
2437 # Normally elide other arch variants, facets
2439 if ignore_excludes:
2440 excludes = EmptyI
2441 else:
2442 excludes = [self.cfg.variants.allow_action,
2443 self.cfg.facets.allow_action]
2445 try:
2446 m = self.__get_manifest(fmri, excludes=excludes,
2447 intent=intent, alt_pub=alt_pub)
2448 except apx.ActionExecutionError as e:
2449 raise
2450 except pkg.actions.ActionError as e:
2451 raise apx.InvalidPackageErrors([e])
2453 return m
2455 def update_pkg_installed_state(self, pkg_pairs, progtrack):
2456 """Sets the recorded installed state of each package pair in
2457 'pkg_pairs'. 'pkg_pair' should be an iterable of tuples of
2458 the format (added, removed) where 'removed' is the FMRI of the
2459 package that was uninstalled, and 'added' is the package
2460 installed for the operation. These pairs are representative of
2461 the destination and origin package for each part of the
2462 operation."""
2464 if self.version < self.CURRENT_VERSION:
2465 raise apx.ImageFormatUpdateNeeded(self.root)
2467 kcat = self.get_catalog(self.IMG_CATALOG_KNOWN)
2468 icat = self.get_catalog(self.IMG_CATALOG_INSTALLED)
2470 added = set()
2471 removed = set()
2472 updated = {}
2473 for add_pkg, rem_pkg in pkg_pairs:
2474 if add_pkg == rem_pkg:
2475 continue
2476 if add_pkg:
2477 added.add(add_pkg)
2478 if rem_pkg:
2479 removed.add(rem_pkg)
2480 if add_pkg and rem_pkg:
2481 updated[add_pkg] = \
2482 dict(kcat.get_entry(rem_pkg).get(
2483 "metadata", {}))
2485 combo = added.union(removed)
2487 progtrack.job_start(progtrack.JOB_STATE_DB)
2488 # 'Updating package state database'
2489 for pfmri in combo:
2490 progtrack.job_add_progress(progtrack.JOB_STATE_DB)
2491 entry = kcat.get_entry(pfmri)
2492 mdata = entry.get("metadata", {})
2493 states = set(mdata.get("states", set()))
2494 if pfmri in removed:
2495 icat.remove_package(pfmri)
2496 states.discard(pkgdefs.PKG_STATE_INSTALLED)
2497 mdata.pop("last-install", None)
2498 mdata.pop("last-update", None)
2500 if pfmri in added:
2501 states.add(pkgdefs.PKG_STATE_INSTALLED)
2502 cur_time = pkg.catalog.now_to_basic_ts()
2503 if pfmri in updated:
2504 last_install = updated[pfmri].get(
2505 "last-install")
2506 if last_install:
2507 mdata["last-install"] = \
2508 last_install
2509 mdata["last-update"] = \
2510 cur_time
2511 else:
2512 mdata["last-install"] = \
2513 cur_time
2514 else:
2515 mdata["last-install"] = cur_time
2516 if pkgdefs.PKG_STATE_ALT_SOURCE in states:
2517 states.discard(
2518 pkgdefs.PKG_STATE_UPGRADABLE)
2519 states.discard(
2520 pkgdefs.PKG_STATE_ALT_SOURCE)
2521 states.discard(
2522 pkgdefs.PKG_STATE_KNOWN)
2523 elif pkgdefs.PKG_STATE_KNOWN not in states:
2524 # This entry is no longer available and has no
2525 # meaningful state information, so should be
2526 # discarded.
2527 kcat.remove_package(pfmri)
2528 progtrack.job_add_progress(
2529 progtrack.JOB_STATE_DB)
2530 continue
2532 if (pkgdefs.PKG_STATE_INSTALLED in states and
2533 pkgdefs.PKG_STATE_UNINSTALLED in states) or (
2534 pkgdefs.PKG_STATE_KNOWN in states and
2535 pkgdefs.PKG_STATE_UNKNOWN in states):
2536 raise apx.ImagePkgStateError(pfmri,
2537 states)
2539 # Catalog format only supports lists.
2540 mdata["states"] = list(states)
2542 # Now record the package state.
2543 kcat.update_entry(mdata, pfmri=pfmri)
2545 # If the package is being marked as installed,
2546 # then it shouldn't already exist in the
2547 # installed catalog and should be added.
2548 if pfmri in added:
2549 icat.append(kcat, pfmri=pfmri)
2551 entry = mdata = states = None
2552 progtrack.job_add_progress(progtrack.JOB_STATE_DB)
2553 progtrack.job_done(progtrack.JOB_STATE_DB)
2555 # Discard entries for alternate source packages that weren't
2556 # installed as part of the operation.
2557 if self.__alt_pkg_pub_map:
2558 for pfmri in self.__alt_known_cat.fmris():
2559 if pfmri in added:
2560 # Nothing to do.
2561 continue
2563 entry = kcat.get_entry(pfmri)
2564 if not entry:
2565 # The only reason that the entry should
2566 # not exist in the 'known' part is
2567 # because it was removed during the
2568 # operation.
2569 assert pfmri in removed
2570 continue
2572 states = entry.get("metadata", {}).get("states",
2573 EmptyI)
2574 if pkgdefs.PKG_STATE_ALT_SOURCE in states:
2575 kcat.remove_package(pfmri)
2577 # Now add the publishers of packages that were installed
2578 # from temporary sources that did not previously exist
2579 # to the image's configuration. (But without any
2580 # origins, sticky, and enabled.)
2581 cfgpubs = set(self.cfg.publishers.keys())
2582 instpubs = set(f.publisher for f in added)
2583 altpubs = self.__alt_known_cat.publishers()
2585 # List of publishers that need to be added is the
2586 # intersection of installed and alternate minus
2587 # the already configured.
2588 newpubs = (instpubs & altpubs) - cfgpubs
2589 for pfx in newpubs:
2590 npub = publisher.Publisher(pfx,
2591 repository=publisher.Repository())
2592 self.__add_publisher(npub,
2593 refresh_allowed=False)
2595 # Ensure image configuration reflects new information.
2596 self.__cleanup_alt_pkg_certs()
2597 self.save_config()
2599 # Remove manifests of packages that were removed from the
2600 # system. Some packages may have only had facets or
2601 # variants changed, so don't remove those.
2603 # 'Updating package cache'
2604 progtrack.job_start(progtrack.JOB_PKG_CACHE, goal=len(removed))
2605 for pfmri in removed:
2606 mcdir = self.get_manifest_dir(pfmri)
2607 manifest.FactoredManifest.clear_cache(mcdir)
2609 # Remove package cache directory if possible; we don't
2610 # care if it fails.
2611 try:
2612 os.rmdir(os.path.dirname(mcdir))
2613 except:
2614 pass
2616 mpath = self.get_manifest_path(pfmri)
2617 try:
2618 portable.remove(mpath)
2619 except EnvironmentError as e:
2620 if e.errno != errno.ENOENT:
2621 raise apx._convert_error(e)
2623 # Remove package manifest directory if possible; we
2624 # don't care if it fails.
2625 try:
2626 os.rmdir(os.path.dirname(mpath))
2627 except:
2628 pass
2629 progtrack.job_add_progress(progtrack.JOB_PKG_CACHE)
2630 progtrack.job_done(progtrack.JOB_PKG_CACHE)
2632 progtrack.job_start(progtrack.JOB_IMAGE_STATE)
2634 # Temporarily redirect the catalogs to a different location,
2635 # so that if the save is interrupted, the image won't be left
2636 # with invalid state, and then save them.
2637 tmp_state_root = self.temporary_dir()
2639 try:
2640 for cat, name in ((kcat, self.IMG_CATALOG_KNOWN),
2641 (icat, self.IMG_CATALOG_INSTALLED)):
2642 cpath = os.path.join(tmp_state_root, name)
2644 # Must copy the old catalog data to the new
2645 # destination as only changed files will be
2646 # written.
2647 progtrack.job_add_progress(
2648 progtrack.JOB_IMAGE_STATE)
2649 misc.copytree(cat.meta_root, cpath)
2650 progtrack.job_add_progress(
2651 progtrack.JOB_IMAGE_STATE)
2652 cat.meta_root = cpath
2653 cat.finalize(pfmris=added)
2654 progtrack.job_add_progress(
2655 progtrack.JOB_IMAGE_STATE)
2656 cat.save()
2657 progtrack.job_add_progress(
2658 progtrack.JOB_IMAGE_STATE)
2660 del cat, name
2661 self.__init_catalogs()
2662 progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE)
2664 # copy any other state files from current state
2665 # dir into new state dir.
2666 for p in os.listdir(self._statedir):
2667 progtrack.job_add_progress(
2668 progtrack.JOB_IMAGE_STATE)
2669 fp = os.path.join(self._statedir, p)
2670 if os.path.isfile(fp):
2671 portable.copyfile(fp,
2672 os.path.join(tmp_state_root, p))
2674 # Next, preserve the old installed state dir, rename the
2675 # new one into place, and then remove the old one.
2676 orig_state_root = self.salvage(self._statedir,
2677 full_path=True)
2678 portable.rename(tmp_state_root, self._statedir)
2680 progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE)
2681 shutil.rmtree(orig_state_root, True)
2683 progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE)
2684 except EnvironmentError as e:
2685 # shutil.Error can contains a tuple of lists of errors.
2686 # Some of the error entries may be a tuple others will
2687 # be a string due to poor error handling in shutil.
2688 if isinstance(e, shutil.Error) and \
2689 type(e.args[0]) == list:
2690 msg = ""
2691 for elist in e.args:
2692 for entry in elist:
2693 if type(entry) == tuple:
2694 msg += "{0}\n".format(
2695 entry[-1])
2696 else:
2697 msg += "{0}\n".format(
2698 entry)
2699 raise apx.UnknownErrors(msg)
2700 raise apx._convert_error(e)
2701 finally:
2702 # Regardless of success, the following must happen.
2703 self.__init_catalogs()
2704 if os.path.exists(tmp_state_root):
2705 shutil.rmtree(tmp_state_root, True)
2707 progtrack.job_done(progtrack.JOB_IMAGE_STATE)
2709 def get_catalog(self, name):
2710 """Returns the requested image catalog.
2712 'name' must be one of the following image constants:
2713 IMG_CATALOG_KNOWN
2714 The known catalog contains all of packages that are
2715 installed or available from a publisher's repository.
2717 IMG_CATALOG_INSTALLED
2718 The installed catalog is a subset of the 'known'
2719 catalog that only contains installed packages."""
2721 if not self.imgdir:
2722 raise RuntimeError("self.imgdir must be set")
2724 cat = self.__catalogs.get(name)
2725 if not cat:
2726 cat = self.__get_catalog(name)
2727 self.__catalogs[name] = cat
2729 if name == self.IMG_CATALOG_KNOWN:
2730 # Apply alternate package source data every time that
2731 # the known catalog is requested.
2732 self.__apply_alt_pkg_sources(cat)
2734 return cat
2736 def _manifest_cb(self, cat, f):
2737 # Only allow lazy-load for packages from non-v1 sources.
2738 # Assume entries for other sources have all data
2739 # required in catalog. This prevents manifest retrieval
2740 # for packages that don't have any related action data
2741 # in the catalog because they don't have any related
2742 # action data in their manifest.
2743 entry = cat.get_entry(f)
2744 states = entry["metadata"]["states"]
2745 if pkgdefs.PKG_STATE_V1 not in states:
2746 return self.get_manifest(f, ignore_excludes=True)
2747 return
2749 def __get_catalog(self, name):
2750 """Private method to retrieve catalog; this bypasses the
2751 normal automatic caching (unless the image hasn't been
2752 upgraded yet)."""
2754 if self.__upgraded and self.version < 3:
2755 # Assume the catalog is already cached in this case
2756 # and can't be reloaded from disk as it doesn't exist
2757 # on disk yet.
2758 return self.__catalogs[name]
2760 croot = os.path.join(self._statedir, name)
2761 try:
2762 os.makedirs(croot)
2763 except EnvironmentError as e:
2764 if e.errno in (errno.EACCES, errno.EROFS):
2765 # Allow operations to work for
2766 # unprivileged users.
2767 croot = None
2768 elif e.errno != errno.EEXIST:
2769 raise
2771 # batch_mode is set to True here as any operations that modify
2772 # the catalogs (add or remove entries) are only done during an
2773 # image upgrade or metadata refresh. In both cases, the catalog
2774 # is resorted and finalized so this is always safe to use.
2775 cat = pkg.catalog.Catalog(batch_mode=True,
2776 manifest_cb=self._manifest_cb, meta_root=croot, sign=False)
2777 return cat
2779 def __remove_catalogs(self):
2780 """Removes all image catalogs and their directories."""
2782 self.__init_catalogs()
2783 for name in (self.IMG_CATALOG_KNOWN,
2784 self.IMG_CATALOG_INSTALLED):
2785 shutil.rmtree(os.path.join(self._statedir, name))
2787 def get_version_installed(self, pfmri):
2788 """Returns an fmri of the installed package matching the
2789 package stem of the given fmri or None if no match is found."""
2791 cat = self.get_catalog(self.IMG_CATALOG_INSTALLED)
2792 for ver, fmris in cat.fmris_by_version(pfmri.pkg_name):
2793 return fmris[0]
2794 return None
2796 def get_pkg_repo(self, pfmri):
2797 """Returns the repository object containing the origins that
2798 should be used to retrieve the specified package or None if
2799 it can be retrieved from all sources or is not a known package.
2802 assert pfmri.publisher
2803 cat = self.get_catalog(self.IMG_CATALOG_KNOWN)
2804 entry = cat.get_entry(pfmri)
2805 if entry is None:
2806 # Package not known.
2807 return
2809 try:
2810 slist = entry["metadata"]["sources"]
2811 except KeyError:
2812 # Can be retrieved from any source.
2813 return
2814 else:
2815 if not slist:
2816 # Can be retrieved from any source.
2817 return
2819 pub = self.get_publisher(prefix=pfmri.publisher)
2820 repo = copy.copy(pub.repository)
2821 norigins = [
2822 o for o in repo.origins
2823 if o.uri in slist
2826 if not norigins:
2827 # Known sources don't match configured; return so that
2828 # caller can fallback to default behaviour.
2829 return
2831 repo.origins = norigins
2832 return repo
2834 def get_pkg_state(self, pfmri):
2835 """Returns the list of states a package is in for this image."""
2837 cat = self.get_catalog(self.IMG_CATALOG_KNOWN)
2838 entry = cat.get_entry(pfmri)
2839 if entry is None:
2840 return []
2841 return entry["metadata"]["states"]
2843 def is_pkg_installed(self, pfmri):
2844 """Returns a boolean value indicating whether the specified
2845 package is installed."""
2847 # Avoid loading the installed catalog if the known catalog
2848 # is already loaded. This is safe since the installed
2849 # catalog is a subset of the known, and a specific entry
2850 # is being retrieved.
2851 if not self.__catalog_loaded(self.IMG_CATALOG_KNOWN):
2852 cat = self.get_catalog(self.IMG_CATALOG_INSTALLED)
2853 else:
2854 cat = self.get_catalog(self.IMG_CATALOG_KNOWN)
2856 entry = cat.get_entry(pfmri)
2857 if entry is None:
2858 return False
2859 states = entry["metadata"]["states"]
2860 return pkgdefs.PKG_STATE_INSTALLED in states
2862 def list_excludes(self, new_variants=None, new_facets=None):
2863 """Generate a list of callables that each return True if an
2864 action is to be included in the image using the currently
2865 defined variants & facets for the image, or an updated set if
2866 new_variants or new_facets are specified."""
2868 if new_variants:
2869 new_vars = self.cfg.variants.copy()
2870 new_vars.update(new_variants)
2871 var_call = new_vars.allow_action
2872 else:
2873 var_call = self.cfg.variants.allow_action
2874 if new_facets is not None:
2875 fac_call = new_facets.allow_action
2876 else:
2877 fac_call = self.cfg.facets.allow_action
2879 return [var_call, fac_call]
2881 def get_variants(self):
2882 """ return a copy of the current image variants"""
2883 return self.cfg.variants.copy()
2885 def get_facets(self):
2886 """ Return a copy of the current image facets"""
2887 return self.cfg.facets.copy()
2889 def __state_updating_pathname(self):
2890 """Return the path to a flag file indicating that the image
2891 catalog is being updated."""
2892 return os.path.join(self._statedir, self.__STATE_UPDATING_FILE)
2894 def __start_state_update(self):
2895 """Called when we start updating the image catalog. Normally
2896 returns False, but will return True if a previous update was
2897 interrupted."""
2899 # get the path to the image catalog update flag file
2900 pathname = self.__state_updating_pathname()
2902 # if the flag file exists a previous update was interrupted so
2903 # return 1
2904 if os.path.exists(pathname):
2905 return True
2907 # create the flag file and return 0
2908 file_mode = misc.PKG_FILE_MODE
2909 try:
2910 open(pathname, "w")
2911 os.chmod(pathname, file_mode)
2912 except EnvironmentError as e:
2913 if e.errno == errno.EACCES:
2914 raise apx.PermissionsException(e.filename)
2915 if e.errno == errno.EROFS:
2916 raise apx.ReadOnlyFileSystemException(
2917 e.filename)
2918 raise
2919 return False
2921 def __end_state_update(self):
2922 """Called when we're done updating the image catalog."""
2924 # get the path to the image catalog update flag file
2925 pathname = self.__state_updating_pathname()
2927 # delete the flag file.
2928 try:
2929 portable.remove(pathname)
2930 except EnvironmentError as e:
2931 if e.errno == errno.EACCES:
2932 raise apx.PermissionsException(e.filename)
2933 if e.errno == errno.EROFS:
2934 raise apx.ReadOnlyFileSystemException(
2935 e.filename)
2936 raise
2938 def __rebuild_image_catalogs(self, progtrack=None):
2939 """Rebuilds the image catalogs based on the available publisher
2940 catalogs."""
2942 if self.version < 3:
2943 raise apx.ImageFormatUpdateNeeded(self.root)
2945 if not progtrack:
2946 progtrack = progress.NullProgressTracker()
2948 progtrack.cache_catalogs_start()
2950 publist = list(self.gen_publishers())
2952 be_name, be_uuid = bootenv.BootEnv.get_be_name(self.root)
2953 self.history.log_operation_start("rebuild-image-catalogs",
2954 be_name=be_name, be_uuid=be_uuid)
2956 # Mark all operations as occurring at this time.
2957 op_time = datetime.datetime.utcnow()
2959 # The image catalogs need to be updated, but this is a bit
2960 # tricky as previously known packages must remain known even
2961 # if PKG_STATE_KNOWN is no longer true if any other state
2962 # information is present. This is to allow freezing, etc. of
2963 # package states on a permanent basis even if the package is
2964 # no longer available from a publisher repository. However,
2965 # this is only True of installed packages.
2966 old_icat = self.get_catalog(self.IMG_CATALOG_INSTALLED)
2968 # batch_mode is set to True here since without it, catalog
2969 # population time is almost doubled (since the catalog is
2970 # re-sorted and stats are generated for every operation).
2971 # In addition, the new catalog is first created in a new
2972 # temporary directory so that it can be moved into place
2973 # at the very end of this process (to minimize the chance
2974 # that failure or interruption will cause the image to be
2975 # left in an inconsistent state).
2976 tmp_state_root = self.temporary_dir()
2978 # Copy any regular files placed in the state directory
2979 for p in os.listdir(self._statedir):
2980 if p == self.__STATE_UPDATING_FILE:
2981 # don't copy the state updating file
2982 continue
2983 fp = os.path.join(self._statedir, p)
2984 if os.path.isfile(fp):
2985 portable.copyfile(fp, os.path.join(tmp_state_root, p))
2987 kcat = pkg.catalog.Catalog(batch_mode=True,
2988 meta_root=os.path.join(tmp_state_root,
2989 self.IMG_CATALOG_KNOWN), sign=False)
2991 # XXX if any of the below fails for any reason, the old 'known'
2992 # catalog needs to be re-loaded so the client is in a consistent
2993 # state.
2995 # All enabled publisher catalogs must be processed.
2996 pub_cats = [(pub.prefix, pub.catalog) for pub in publist]
2998 # XXX For backwards compatibility, 'upgradability' of packages
2999 # is calculated and stored based on whether a given pkg stem
3000 # matches the newest version in the catalog. This is quite
3001 # expensive (due to overhead), but at least the cost is
3002 # consolidated here. This comparison is also cross-publisher,
3003 # as it used to be. In the future, it could likely be improved
3004 # by usage of the SAT solver.
3005 newest = {}
3006 for pfx, cat in [(None, old_icat)] + pub_cats:
3007 for f in cat.fmris(last=True,
3008 pubs=pfx and [pfx] or EmptyI):
3009 nver, snver = newest.get(f.pkg_name, (None,
3010 None))
3011 if f.version > nver:
3012 newest[f.pkg_name] = (f.version,
3013 str(f.version))
3015 # Next, copy all of the entries for the catalog parts that
3016 # currently exist into the image 'known' catalog.
3018 # Iterator for source parts.
3019 sparts = (
3020 (pfx, cat, name, cat.get_part(name, must_exist=True))
3021 for pfx, cat in pub_cats
3022 for name in cat.parts
3025 # Build list of installed packages based on actual state
3026 # information just in case there is a state issue from an
3027 # older client.
3028 inst_stems = {}
3029 for t, entry in old_icat.tuple_entries():
3030 states = entry["metadata"]["states"]
3031 if pkgdefs.PKG_STATE_INSTALLED not in states:
3032 continue
3033 pub, stem, ver = t
3034 inst_stems.setdefault(pub, {})
3035 inst_stems[pub].setdefault(stem, {})
3036 inst_stems[pub][stem][ver] = False
3038 # Create the new installed catalog in a temporary location.
3039 icat = pkg.catalog.Catalog(batch_mode=True,
3040 meta_root=os.path.join(tmp_state_root,
3041 self.IMG_CATALOG_INSTALLED), sign=False)
3043 excludes = self.list_excludes()
3045 frozen_pkgs = dict([
3046 (p[0].pkg_name, p[0]) for p in self.get_frozen_list()
3048 for pfx, cat, name, spart in sparts:
3049 # 'spart' is the source part.
3050 if spart is None:
3051 # Client hasn't retrieved this part.
3052 continue
3054 # New known part.
3055 nkpart = kcat.get_part(name)
3056 nipart = icat.get_part(name)
3057 base = name.startswith("catalog.base.")
3059 # Avoid accessor overhead since these will be
3060 # used for every entry.
3061 cat_ver = cat.version
3062 dp = cat.get_part("catalog.dependency.C",
3063 must_exist=True)
3065 for t, sentry in spart.tuple_entries(pubs=[pfx]):
3066 pub, stem, ver = t
3068 installed = False
3069 if pub in inst_stems and \
3070 stem in inst_stems[pub] and \
3071 ver in inst_stems[pub][stem]:
3072 installed = True
3073 inst_stems[pub][stem][ver] = True
3075 # copy() is too slow here and catalog entries
3076 # are shallow so this should be sufficient.
3077 entry = dict(sentry.iteritems())
3078 if not base:
3079 # Nothing else to do except add the
3080 # entry for non-base catalog parts.
3081 nkpart.add(metadata=entry,
3082 op_time=op_time, pub=pub, stem=stem,
3083 ver=ver)
3084 if installed:
3085 nipart.add(metadata=entry,
3086 op_time=op_time, pub=pub,
3087 stem=stem, ver=ver)
3088 continue
3090 # Only the base catalog part stores package
3091 # state information and/or other metadata.
3092 mdata = entry.setdefault("metadata", {})
3093 states = mdata.setdefault("states", [])
3094 states.append(pkgdefs.PKG_STATE_KNOWN)
3096 if cat_ver == 0:
3097 states.append(pkgdefs.PKG_STATE_V0)
3098 elif pkgdefs.PKG_STATE_V0 not in states:
3099 # Assume V1 catalog source.
3100 states.append(pkgdefs.PKG_STATE_V1)
3102 if installed:
3103 states.append(
3104 pkgdefs.PKG_STATE_INSTALLED)
3106 nver, snver = newest.get(stem, (None, None))
3107 if snver is not None and ver != snver:
3108 states.append(
3109 pkgdefs.PKG_STATE_UPGRADABLE)
3111 # Check if the package is frozen.
3112 if stem in frozen_pkgs:
3113 f_ver = frozen_pkgs[stem].version
3114 if f_ver == ver or \
3115 pkg.version.Version(ver
3116 ).is_successor(f_ver,
3117 constraint=
3118 pkg.version.CONSTRAINT_AUTO):
3119 states.append(
3120 pkgdefs.PKG_STATE_FROZEN)
3122 # Determine if package is obsolete or has been
3123 # renamed and mark with appropriate state.
3124 dpent = None
3125 if dp is not None:
3126 dpent = dp.get_entry(pub=pub, stem=stem,
3127 ver=ver)
3128 if dpent is not None:
3129 for a in dpent["actions"]:
3130 # Constructing action objects
3131 # for every action would be a
3132 # lot slower, so a simple string
3133 # match is done first so that
3134 # only interesting actions get
3135 # constructed.
3136 if not a.startswith("set"):
3137 continue
3138 if not ("pkg.obsolete" in a or \
3139 "pkg.renamed" in a):
3140 continue
3142 try:
3143 act = pkg.actions.fromstr(a)
3144 except pkg.actions.ActionError:
3145 # If the action can't be
3146 # parsed or is not yet
3147 # supported, continue.
3148 continue
3150 if act.attrs["value"].lower() != "true":
3151 continue
3153 if act.attrs["name"] == "pkg.obsolete":
3154 states.append(
3155 pkgdefs.PKG_STATE_OBSOLETE)
3156 elif act.attrs["name"] == "pkg.renamed":
3157 if not act.include_this(
3158 excludes, publisher=pub):
3159 continue
3160 states.append(
3161 pkgdefs.PKG_STATE_RENAMED)
3163 mdata["states"] = states
3165 # Add base entries.
3166 nkpart.add(metadata=entry, op_time=op_time,
3167 pub=pub, stem=stem, ver=ver)
3168 if installed:
3169 nipart.add(metadata=entry,
3170 op_time=op_time, pub=pub, stem=stem,
3171 ver=ver)
3173 # Now add installed packages to list of known packages using
3174 # previous state information. While doing so, track any
3175 # new entries as the versions for the stem of the entry will
3176 # need to be passed to finalize() for sorting.
3177 final_fmris = []
3178 for name in old_icat.parts:
3179 # Old installed part.
3180 ipart = old_icat.get_part(name, must_exist=True)
3182 # New known part.
3183 nkpart = kcat.get_part(name)
3185 # New installed part.
3186 nipart = icat.get_part(name)
3188 base = name.startswith("catalog.base.")
3190 mdata = None
3191 for t, entry in ipart.tuple_entries():
3192 pub, stem, ver = t
3194 if pub not in inst_stems or \
3195 stem not in inst_stems[pub] or \
3196 ver not in inst_stems[pub][stem] or \
3197 inst_stems[pub][stem][ver]:
3198 # Entry is no longer valid or is already
3199 # known.
3200 continue
3202 if base:
3203 mdata = entry["metadata"]
3204 states = set(mdata["states"])
3205 states.discard(pkgdefs.PKG_STATE_KNOWN)
3207 nver, snver = newest.get(stem, (None,
3208 None))
3209 if not nver or \
3210 (snver is not None and ver == snver):
3211 states.discard(
3212 pkgdefs.PKG_STATE_UPGRADABLE)
3213 elif snver is not None:
3214 states.add(
3215 pkgdefs.PKG_STATE_UPGRADABLE)
3216 mdata["states"] = list(states)
3218 # Add entries.
3219 nkpart.add(metadata=entry, op_time=op_time,
3220 pub=pub, stem=stem, ver=ver)
3221 nipart.add(metadata=entry, op_time=op_time,
3222 pub=pub, stem=stem, ver=ver)
3223 final_fmris.append(pkg.fmri.PkgFmri(name=stem,
3224 publisher=pub, version=ver))
3226 # Save the new catalogs.
3227 for cat in kcat, icat:
3228 misc.makedirs(cat.meta_root)
3229 cat.finalize(pfmris=final_fmris)
3230 cat.save()
3232 # Next, preserve the old installed state dir, rename the
3233 # new one into place, and then remove the old one.
3234 orig_state_root = self.salvage(self._statedir, full_path=True)
3235 portable.rename(tmp_state_root, self._statedir)
3236 shutil.rmtree(orig_state_root, True)
3238 # Ensure in-memory catalogs get reloaded.
3239 self.__init_catalogs()
3241 progtrack.cache_catalogs_done()
3242 self.history.log_operation_end()
3244 def refresh_publishers(self, full_refresh=False, immediate=False,
3245 pubs=None, progtrack=None):
3246 """Refreshes the metadata (e.g. catalog) for one or more
3247 publishers. Callers are responsible for locking the image.
3249 'full_refresh' is an optional boolean value indicating whether
3250 a full retrieval of publisher metadata (e.g. catalogs) or only
3251 an update to the existing metadata should be performed. When
3252 True, 'immediate' is also set to True.
3254 'immediate' is an optional boolean value indicating whether the
3255 a refresh should occur now. If False, a publisher's selected
3256 repository will only be checked for updates if the update
3257 interval period recorded in the image configuration has been
3258 exceeded.
3260 'pubs' is a list of publisher prefixes or publisher objects
3261 to refresh. Passing an empty list or using the default value
3262 implies all publishers."""
3264 if self.version < 3:
3265 raise apx.ImageFormatUpdateNeeded(self.root)
3267 if not progtrack:
3268 progtrack = progress.NullProgressTracker()
3270 be_name, be_uuid = bootenv.BootEnv.get_be_name(self.root)
3271 self.history.log_operation_start("refresh-publishers",
3272 be_name=be_name, be_uuid=be_uuid)
3274 pubs_to_refresh = []
3276 if not pubs:
3277 # Omit disabled publishers.
3278 pubs = [p for p in self.gen_publishers()]
3280 if not pubs:
3281 self.__rebuild_image_catalogs(progtrack=progtrack)
3282 return
3284 for pub in pubs:
3285 p = pub
3286 if not isinstance(p, publisher.Publisher):
3287 p = self.get_publisher(prefix=p)
3288 if p.disabled:
3289 e = apx.DisabledPublisher(p)
3290 self.history.log_operation_end(error=e)
3291 raise e
3292 pubs_to_refresh.append(p)
3294 if not pubs_to_refresh:
3295 self.history.log_operation_end(
3296 result=history.RESULT_NOTHING_TO_DO)
3297 return
3299 # Verify validity of certificates before attempting network
3300 # operations.
3301 try:
3302 self.check_cert_validity(pubs=pubs_to_refresh)
3303 except apx.ExpiringCertificate as e:
3304 logger.error(str(e))
3306 try:
3307 # Ensure Image directory structure is valid.
3308 self.mkdirs()
3309 except Exception as e:
3310 self.history.log_operation_end(error=e)
3311 raise
3313 progtrack.refresh_start(len(pubs_to_refresh),
3314 full_refresh=full_refresh)
3316 failed = []
3317 total = 0
3318 succeeded = set()
3319 updated = self.__start_state_update()
3320 for pub in pubs_to_refresh:
3321 total += 1
3322 progtrack.refresh_start_pub(pub)
3323 try:
3324 if pub.refresh(full_refresh=full_refresh,
3325 immediate=immediate, progtrack=progtrack):
3326 updated = True
3327 except apx.PermissionsException as e:
3328 failed.append((pub, e))
3329 # No point in continuing since no data can
3330 # be written.
3331 break
3332 except apx.ApiException as e:
3333 failed.append((pub, e))
3334 continue
3335 finally:
3336 progtrack.refresh_end_pub(pub)
3337 succeeded.add(pub.prefix)
3339 progtrack.refresh_done()
3341 if updated:
3342 self.__rebuild_image_catalogs(progtrack=progtrack)
3343 else:
3344 self.__end_state_update()
3346 if failed:
3347 e = apx.CatalogRefreshException(failed, total,
3348 len(succeeded))
3349 self.history.log_operation_end(error=e)
3350 raise e
3352 if not updated:
3353 self.history.log_operation_end(
3354 result=history.RESULT_NOTHING_TO_DO)
3355 return
3356 self.history.log_operation_end()
3358 def _get_publisher_meta_dir(self):
3359 if self.version >= 3:
3360 return IMG_PUB_DIR
3361 return "catalog"
3363 def _get_publisher_cache_root(self, prefix):
3364 return os.path.join(self.imgdir, "cache", "publisher", prefix)
3366 def _get_publisher_meta_root(self, prefix):
3367 return os.path.join(self.imgdir, self._get_publisher_meta_dir(),
3368 prefix)
3370 def remove_publisher_metadata(self, pub, progtrack=None, rebuild=True):
3371 """Removes the metadata for the specified publisher object,
3372 except data for installed packages.
3374 'pub' is the object of the publisher to remove the data for.
3376 'progtrack' is an optional ProgressTracker object.
3378 'rebuild' is an optional boolean specifying whether image
3379 catalogs should be rebuilt after removing the publisher's
3380 metadata.
3383 if self.version < 4:
3384 # Older images don't require fine-grained deletion.
3385 pub.remove_meta_root()
3386 if rebuild:
3387 self.__rebuild_image_catalogs(
3388 progtrack=progtrack)
3389 return
3391 # Build a list of paths that shouldn't be removed because they
3392 # belong to installed packages.
3393 excluded = [
3394 self.get_manifest_path(f)
3395 for f in self.gen_installed_pkgs()
3396 if f.publisher == pub.prefix
3399 if not excluded:
3400 pub.remove_meta_root()
3401 else:
3402 try:
3403 # Discard all publisher metadata except
3404 # package manifests as a first pass.
3405 for entry in os.listdir(pub.meta_root):
3406 if entry == "pkg":
3407 continue
3409 target = os.path.join(pub.meta_root,
3410 entry)
3411 if os.path.isdir(target):
3412 shutil.rmtree(target,
3413 ignore_errors=True)
3414 else:
3415 portable.remove(target)
3417 # Build the list of directories that can't be
3418 # removed.
3419 exdirs = [os.path.dirname(e) for e in excluded]
3421 # Now try to discard only package manifests
3422 # that aren't for installed packages.
3423 mroot = os.path.join(pub.meta_root, "pkg")
3424 for pdir in os.listdir(mroot):
3425 proot = os.path.join(mroot, pdir)
3426 if proot not in exdirs:
3427 # This removes all manifest data
3428 # for a given package stem.
3429 shutil.rmtree(proot,
3430 ignore_errors=True)
3431 continue
3433 # Remove only manifest data for packages
3434 # that are not installed.
3435 for mname in os.listdir(proot):
3436 mpath = os.path.join(proot,
3437 mname)
3438 if mpath not in excluded:
3439 portable.remove(mpath)
3441 # Finally, dump any cache data for this
3442 # publisher if possible.
3443 shutil.rmtree(self._get_publisher_cache_root(
3444 pub.prefix), ignore_errors=True)
3445 except EnvironmentError as e:
3446 if e.errno != errno.ENOENT:
3447 raise apx._convert_error(e)
3449 if rebuild:
3450 self.__rebuild_image_catalogs(progtrack=progtrack)
3452 def gen_installed_pkg_names(self, anarchy=True):
3453 """A generator function that produces FMRI strings as it
3454 iterates over the list of installed packages. This is
3455 faster than gen_installed_pkgs when only the FMRI string
3456 is needed."""
3458 cat = self.get_catalog(self.IMG_CATALOG_INSTALLED)
3459 for f in cat.fmris(objects=False):
3460 if anarchy:
3461 # Catalog entries always have publisher prefix.
3462 yield "pkg:/{0}".format(f[6:].split("/", 1)[-1])
3463 continue
3464 yield f
3466 def gen_installed_pkgs(self, pubs=EmptyI, ordered=False):
3467 """Return an iteration through the installed packages."""
3469 cat = self.get_catalog(self.IMG_CATALOG_INSTALLED)
3470 for f in cat.fmris(pubs=pubs, ordered=ordered):
3471 yield f
3473 def count_installed_pkgs(self, pubs=EmptyI):
3474 """Return the number of installed packages."""
3475 cat = self.get_catalog(self.IMG_CATALOG_INSTALLED)
3476 assert cat.package_count == cat.package_version_count
3477 return sum(
3478 pkg_count
3479 for (pub, pkg_count, _ignored) in
3480 cat.get_package_counts_by_pub(pubs=pubs)
3483 def gen_tracked_stems(self):
3484 """Return an iteration through all the tracked pkg stems
3485 in the set of currently installed packages. Return value
3486 is group pkg fmri, stem"""
3487 cat = self.get_catalog(self.IMG_CATALOG_INSTALLED)
3488 excludes = self.list_excludes()
3490 for f in cat.fmris():
3491 for a in cat.get_entry_actions(f,
3492 [pkg.catalog.Catalog.DEPENDENCY], excludes=excludes):
3493 if a.name == "depend" and a.attrs["type"] == "group":
3494 yield (f, self.strtofmri(
3495 a.attrs["fmri"]).pkg_name)
3497 def _create_fast_lookups(self, progtrack=None):
3498 """Create an on-disk database mapping action name and key
3499 attribute value to the action string comprising the unique
3500 attributes of the action, for all installed actions. This is
3501 done with a file mapping the tuple to an offset into a second
3502 file, where those actions are kept. Once the offsets are loaded
3503 into memory, it is simple to seek into the second file to the
3504 given offset and read until you hit an action that doesn't
3505 match."""
3507 if not progtrack:
3508 progtrack = progress.NullProgressTracker()
3510 self.__actdict = None
3511 self.__actdict_timestamp = None
3512 stripped_path = os.path.join(self.__action_cache_dir,
3513 "actions.stripped")
3514 offsets_path = os.path.join(self.__action_cache_dir,
3515 "actions.offsets")
3516 conflicting_keys_path = os.path.join(self.__action_cache_dir,
3517 "keys.conflicting")
3519 excludes = self.list_excludes()
3520 heap = []
3522 # nsd is the "name-space dictionary." It maps action name
3523 # spaces (see action.generic for more information) to
3524 # dictionaries which map keys to pairs which contain an action
3525 # with that key and the pfmri of the package which delivered the
3526 # action.
3527 nsd = {}
3529 from heapq import heappush, heappop
3531 progtrack.job_start(progtrack.JOB_FAST_LOOKUP)
3533 for pfmri in self.gen_installed_pkgs():
3534 progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP)
3535 m = self.get_manifest(pfmri, ignore_excludes=True)
3536 for act in m.gen_actions(excludes=excludes):
3537 if not act.globally_identical:
3538 continue
3539 act.strip()
3540 heappush(heap, (act.name,
3541 act.attrs[act.key_attr], pfmri, act))
3542 nsd.setdefault(act.namespace_group, {})
3543 nsd[act.namespace_group].setdefault(
3544 act.attrs[act.key_attr], [])
3545 nsd[act.namespace_group][
3546 act.attrs[act.key_attr]].append((
3547 act, pfmri))
3549 progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP)
3551 # If we can't write the temporary files, then there's no point
3552 # in producing actdict because it depends on a synchronized
3553 # stripped actions file.
3554 try:
3555 actdict = {}
3556 sf, sp = self.temporary_file(close=False)
3557 of, op = self.temporary_file(close=False)
3558 bf, bp = self.temporary_file(close=False)
3560 sf = os.fdopen(sf, "wb")
3561 of = os.fdopen(of, "wb")
3562 bf = os.fdopen(bf, "wb")
3564 # We need to make sure the files are coordinated.
3565 timestamp = int(time.time())
3566 sf.write("VERSION 1\n{0}\n".format(timestamp))
3567 of.write("VERSION 2\n{0}\n".format(timestamp))
3568 # The conflicting keys file doesn't need a timestamp
3569 # because it's not coordinated with the stripped or
3570 # offsets files and the result of loading it isn't
3571 # reused by this class.
3572 bf.write("VERSION 1\n")
3574 last_name, last_key, last_offset = None, None, sf.tell()
3575 cnt = 0
3576 while heap:
3577 # This is a tight loop, so try to avoid burning
3578 # CPU calling into the progress tracker
3579 # excessively.
3580 if len(heap) % 100 == 0:
3581 progtrack.job_add_progress(
3582 progtrack.JOB_FAST_LOOKUP)
3583 item = heappop(heap)
3584 fmri, act = item[2:]
3585 key = act.attrs[act.key_attr]
3586 if act.name != last_name or key != last_key:
3587 if last_name is None:
3588 assert last_key is None
3589 cnt += 1
3590 last_name = act.name
3591 last_key = key
3592 else:
3593 assert cnt > 0
3594 of.write("{0} {1} {2} {3}\n".format(
3595 last_name, last_offset,
3596 cnt, last_key))
3597 actdict[(last_name, last_key)] = last_offset, cnt
3598 last_name, last_key, last_offset = \
3599 act.name, key, sf.tell()
3600 cnt = 1
3601 else:
3602 cnt += 1
3603 sf.write("{0} {1}\n".format(fmri, act))
3604 if last_name is not None:
3605 assert last_key is not None
3606 assert last_offset is not None
3607 assert cnt > 0
3608 of.write("{0} {1} {2} {3}\n".format(
3609 last_name, last_offset, cnt, last_key))
3610 actdict[(last_name, last_key)] = \
3611 last_offset, cnt
3613 progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP)
3615 bad_keys = imageplan.ImagePlan._check_actions(nsd)
3616 for k in sorted(bad_keys):
3617 bf.write("{0}\n".format(k))
3619 progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP)
3620 sf.close()
3621 of.close()
3622 bf.close()
3623 os.chmod(sp, misc.PKG_FILE_MODE)
3624 os.chmod(op, misc.PKG_FILE_MODE)
3625 os.chmod(bp, misc.PKG_FILE_MODE)
3626 except BaseException as e:
3627 try:
3628 os.unlink(sp)
3629 os.unlink(op)
3630 os.unlink(bp)
3631 except:
3632 pass
3633 raise
3635 progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP)
3637 # Finally, rename the temporary files into their final place.
3638 # If we have any problems, do our best to remove them, and we'll
3639 # try to recreate them on the read-side.
3640 try:
3641 if not os.path.exists(self.__action_cache_dir):
3642 os.makedirs(self.__action_cache_dir)
3643 portable.rename(sp, stripped_path)
3644 portable.rename(op, offsets_path)
3645 portable.rename(bp, conflicting_keys_path)
3646 except EnvironmentError as e:
3647 if e.errno == errno.EACCES or e.errno == errno.EROFS:
3648 self.__action_cache_dir = self.temporary_dir()
3649 stripped_path = os.path.join(
3650 self.__action_cache_dir, "actions.stripped")
3651 offsets_path = os.path.join(
3652 self.__action_cache_dir, "actions.offsets")
3653 conflicting_keys_path = os.path.join(
3654 self.__action_cache_dir, "keys.conflicting")
3655 portable.rename(sp, stripped_path)
3656 portable.rename(op, offsets_path)
3657 portable.rename(bp, conflicting_keys_path)
3658 else:
3659 exc_info = sys.exc_info()
3660 try:
3661 os.unlink(stripped_path)
3662 os.unlink(offsets_path)
3663 os.unlink(conflicting_keys_path)
3664 except:
3665 pass
3666 raise exc_info[0], exc_info[1], exc_info[2]
3668 progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP)
3669 progtrack.job_done(progtrack.JOB_FAST_LOOKUP)
3670 return actdict, timestamp
3672 def _remove_fast_lookups(self):
3673 """Remove on-disk database created by _create_fast_lookups.
3674 Should be called before updating image state to prevent the
3675 client from seeing stale state if _create_fast_lookups is
3676 interrupted."""
3678 for fname in ("actions.stripped", "actions.offsets",
3679 "keys.conflicting"):
3680 try:
3681 portable.remove(os.path.join(
3682 self.__action_cache_dir, fname))
3683 except EnvironmentError as e:
3684 if e.errno == errno.ENOENT:
3685 continue
3686 raise apx._convert_error(e)
3688 def _load_actdict(self, progtrack):
3689 """Read the file of offsets created in _create_fast_lookups()
3690 and return the dictionary mapping action name and key value to
3691 offset."""
3693 try:
3694 of = open(os.path.join(self.__action_cache_dir,
3695 "actions.offsets"), "rb")
3696 except IOError as e:
3697 if e.errno != errno.ENOENT:
3698 raise
3699 actdict, otimestamp = self._create_fast_lookups()
3700 assert actdict is not None
3701 self.__actdict = actdict
3702 self.__actdict_timestamp = otimestamp
3703 return actdict
3705 # Make sure the files are paired, and try to create them if not.
3706 oversion = of.readline().rstrip()
3707 otimestamp = of.readline().rstrip()
3709 # The original action.offsets file existed and had the same
3710 # timestamp as the stored actdict, so that actdict can be
3711 # reused.
3712 if self.__actdict and otimestamp == self.__actdict_timestamp:
3713 return self.__actdict
3715 sversion, stimestamp = self._get_stripped_actions_file(
3716 internal=True)
3718 # If we recognize neither file's version or their timestamps
3719 # don't match, then we blow them away and try again.
3720 if oversion != "VERSION 2" or sversion != "VERSION 1" or \
3721 stimestamp != otimestamp:
3722 of.close()
3723 actdict, otimestamp = self._create_fast_lookups()
3724 assert actdict is not None
3725 self.__actdict = actdict
3726 self.__actdict_timestamp = otimestamp
3727 return actdict
3729 # At this point, the original actions.offsets file existed, no
3730 # actdict was saved in the image, the versions matched what was
3731 # expected, and the timestamps of the actions.offsets and
3732 # actions.stripped files matched, so the actions.offsets file is
3733 # parsed to generate actdict.
3734 actdict = {}
3736 for line in of:
3737 actname, offset, cnt, key_attr = \
3738 line.rstrip().split(None, 3)
3739 off = int(offset)
3740 actdict[(actname, key_attr)] = (off, int(cnt))
3742 # This is a tight loop, so try to avoid burning
3743 # CPU calling into the progress tracker excessively.
3744 # Since we are already using the offset, we use that
3745 # to damp calls back into the progress tracker.
3746 if off % 500 == 0:
3747 progtrack.plan_add_progress(
3748 progtrack.PLAN_ACTION_CONFLICT)
3750 of.close()
3751 self.__actdict = actdict
3752 self.__actdict_timestamp = otimestamp
3753 return actdict
3755 def _get_stripped_actions_file(self, internal=False):
3756 """Open the actions file described in _create_fast_lookups() and
3757 return the corresponding file object."""
3759 sf = file(os.path.join(self.__action_cache_dir,
3760 "actions.stripped"), "rb")
3761 sversion = sf.readline().rstrip()
3762 stimestamp = sf.readline().rstrip()
3763 if internal:
3764 sf.close()
3765 return sversion, stimestamp
3767 return sf
3769 def _load_conflicting_keys(self):
3770 """Load the list of keys which have conflicting actions in the
3771 existing image. If no such list exists, then return None."""
3773 pth = os.path.join(self.__action_cache_dir, "keys.conflicting")
3774 try:
3775 with open(pth, "rb") as fh:
3776 version = fh.readline().rstrip()
3777 if version != "VERSION 1":
3778 return None
3779 return set(l.rstrip() for l in fh)
3780 except EnvironmentError as e:
3781 if e.errno == errno.ENOENT:
3782 return None
3783 raise
3785 def gen_installed_actions_bytype(self, atype, implicit_dirs=False):
3786 """Iterates through the installed actions of type 'atype'. If
3787 'implicit_dirs' is True and 'atype' is 'dir', then include
3788 directories only implicitly defined by other filesystem
3789 actions."""
3791 if implicit_dirs and atype != "dir":
3792 implicit_dirs = False
3794 excludes = self.list_excludes()
3796 for pfmri in self.gen_installed_pkgs():
3797 m = self.get_manifest(pfmri)
3798 dirs = set()
3799 for act in m.gen_actions_by_type(atype,
3800 excludes=excludes):
3801 if implicit_dirs:
3802 dirs.add(act.attrs["path"])
3803 yield act, pfmri
3804 if implicit_dirs:
3805 da = pkg.actions.directory.DirectoryAction
3806 for d in m.get_directories(excludes):
3807 if d not in dirs:
3808 yield da(path=d, implicit="true"), pfmri
3810 def get_installed_pubs(self):
3811 """Returns a set containing the prefixes of all publishers with
3812 installed packages."""
3814 cat = self.get_catalog(self.IMG_CATALOG_INSTALLED)
3815 return cat.publishers()
3817 def strtofmri(self, myfmri):
3818 return pkg.fmri.PkgFmri(myfmri)
3820 def strtomatchingfmri(self, myfmri):
3821 return pkg.fmri.MatchingPkgFmri(myfmri)
3823 def get_user_by_name(self, name):
3824 uid = self._usersbyname.get(name, None)
3825 if uid is not None:
3826 return uid
3827 return portable.get_user_by_name(name, self.root,
3828 self.type != IMG_USER)
3830 def get_name_by_uid(self, uid, returnuid = False):
3831 # XXX What to do about IMG_PARTIAL?
3832 try:
3833 return portable.get_name_by_uid(uid, self.root,
3834 self.type != IMG_USER)
3835 except KeyError:
3836 if returnuid:
3837 return uid
3838 else:
3839 raise
3841 def get_group_by_name(self, name):
3842 gid = self._groupsbyname.get(name, None)
3843 if gid is not None:
3844 return gid
3845 return portable.get_group_by_name(name, self.root,
3846 self.type != IMG_USER)
3848 def get_name_by_gid(self, gid, returngid = False):
3849 try:
3850 return portable.get_name_by_gid(gid, self.root,
3851 self.type != IMG_USER)
3852 except KeyError:
3853 if returngid:
3854 return gid
3855 else:
3856 raise
3858 def update_index_dir(self, postfix="index"):
3859 """Since the index directory will not reliably be updated when
3860 the image root is, this should be called prior to using the
3861 index directory.
3863 if self.version == self.CURRENT_VERSION:
3864 self.index_dir = os.path.join(self.imgdir, "cache",
3865 postfix)
3866 else:
3867 self.index_dir = os.path.join(self.imgdir, postfix)
3869 def cleanup_downloads(self):
3870 """Clean up any downloads that were in progress but that
3871 did not successfully finish."""
3873 shutil.rmtree(self._incoming_cache_dir, True)
3875 def cleanup_cached_content(self, progtrack=None):
3876 """Delete the directory that stores all of our cached
3877 downloaded content. This may take a while for a large
3878 directory hierarchy. Don't clean up caches if the
3879 user overrode the underlying setting using PKG_CACHEDIR or
3880 PKG_CACHEROOT. """
3882 if not self.cfg.get_policy(imageconfig.FLUSH_CONTENT_CACHE):
3883 return
3885 cdirs = []
3886 for path, readonly, pub, layout in self.get_cachedirs():
3887 if readonly or (self.__user_cache_dir and
3888 path.startswith(self.__user_cache_dir)):
3889 continue
3890 cdirs.append(path)
3892 if not cdirs:
3893 return
3895 if not progtrack:
3896 progtrack = progress.NullProgressTracker()
3898 # 'Updating package cache'
3899 progtrack.job_start(progtrack.JOB_PKG_CACHE, goal=len(cdirs))
3900 for path in cdirs:
3901 shutil.rmtree(path, True)
3902 progtrack.job_add_progress(progtrack.JOB_PKG_CACHE)
3903 progtrack.job_done(progtrack.JOB_PKG_CACHE)
3905 def salvage(self, path, full_path=False):
3906 """Called when unexpected file or directory is found during
3907 package operations; returns the path of the salvage
3908 directory where the item was stored. Can be called with
3909 either image-relative or absolute (current) path to file/dir
3910 to be salvaged. If full_path is False (the default), remove
3911 the current mountpoint of the image from the returned
3912 directory path"""
3914 # This ensures that if the path is already rooted in the image,
3915 # that it will be stored in lost+found (due to os.path.join
3916 # behaviour with absolute path components).
3917 if path.startswith(self.root):
3918 path = path.replace(self.root, "", 1)
3920 if os.path.isabs(path):
3921 # If for some reason the path wasn't rooted in the
3922 # image, but it is an absolute one, then strip the
3923 # absolute part so that it will be stored in lost+found
3924 # (due to os.path.join behaviour with absolute path
3925 # components).
3926 path = os.path.splitdrive(path)[-1].lstrip(os.path.sep)
3928 sdir = os.path.normpath(
3929 os.path.join(self.imgdir, "lost+found",
3930 path + "-" + time.strftime("%Y%m%dT%H%M%SZ")))
3932 parent = os.path.dirname(sdir)
3933 if not os.path.exists(parent):
3934 misc.makedirs(parent)
3936 orig = os.path.normpath(os.path.join(self.root, path))
3938 misc.move(orig, sdir)
3939 # remove current mountpoint from sdir
3940 if not full_path:
3941 sdir.replace(self.root, "", 1)
3942 return sdir
3944 def recover(self, local_spath, full_dest_path):
3945 """Called when recovering directory contents to implement
3946 "salvage-from" directive... full_dest_path must exist."""
3947 source_path = os.path.normpath(os.path.join(self.root, local_spath))
3948 for file_name in os.listdir(source_path):
3949 misc.move(os.path.join(source_path, file_name),
3950 os.path.join(full_dest_path, file_name))
3952 def temporary_dir(self):
3953 """Create a temp directory under the image directory for various
3954 purposes. If the process is unable to create a directory in the
3955 image's temporary directory, a replacement location is found."""
3957 try:
3958 misc.makedirs(self.__tmpdir)
3959 except (apx.PermissionsException,
3960 apx.ReadOnlyFileSystemException):
3961 self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-")
3962 atexit.register(shutil.rmtree,
3963 self.__tmpdir, ignore_errors=True)
3964 return self.temporary_dir()
3966 try:
3967 rval = tempfile.mkdtemp(dir=self.__tmpdir)
3969 # Force standard mode.
3970 os.chmod(rval, misc.PKG_DIR_MODE)
3971 return rval
3972 except EnvironmentError as e:
3973 if e.errno == errno.EACCES or e.errno == errno.EROFS:
3974 self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-")
3975 atexit.register(shutil.rmtree,
3976 self.__tmpdir, ignore_errors=True)
3977 return self.temporary_dir()
3978 raise apx._convert_error(e)
3980 def temporary_file(self, close=True):
3981 """Create a temporary file under the image directory for various
3982 purposes. If 'close' is True, close the file descriptor;
3983 otherwise leave it open. If the process is unable to create a
3984 file in the image's temporary directory, a replacement is
3985 found."""
3987 try:
3988 misc.makedirs(self.__tmpdir)
3989 except (apx.PermissionsException,
3990 apx.ReadOnlyFileSystemException):
3991 self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-")
3992 atexit.register(shutil.rmtree,
3993 self.__tmpdir, ignore_errors=True)
3994 return self.temporary_file(close=close)
3996 try:
3997 fd, name = tempfile.mkstemp(dir=self.__tmpdir)
3998 if close:
3999 os.close(fd)
4000 except EnvironmentError as e:
4001 if e.errno == errno.EACCES or e.errno == errno.EROFS:
4002 self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-")
4003 atexit.register(shutil.rmtree,
4004 self.__tmpdir, ignore_errors=True)
4005 return self.temporary_file(close=close)
4006 raise apx._convert_error(e)
4008 if close:
4009 return name
4010 else:
4011 return fd, name
4013 def __filter_install_matches(self, matches):
4014 """Attempts to eliminate redundant matches found during
4015 packaging operations:
4017 * First, stems of installed packages for publishers that
4018 are now unknown (no longer present in the image
4019 configuration) are dropped.
4021 * Second, if multiple matches are still present, stems of
4022 of installed packages, that are not presently in the
4023 corresponding publisher's catalog, are dropped.
4025 * Finally, if multiple matches are still present, all
4026 stems except for those in state PKG_STATE_INSTALLED are
4027 dropped.
4029 Returns a list of the filtered matches, along with a dict of
4030 their unique names."""
4032 olist = []
4033 onames = set()
4035 # First eliminate any duplicate matches that are for unknown
4036 # publishers (publishers which have been removed from the image
4037 # configuration).
4038 publist = set(p.prefix for p in self.get_publishers().values())
4039 for m, st in matches:
4040 if m.publisher in publist:
4041 onames.add(m.get_pkg_stem())
4042 olist.append((m, st))
4044 # Next, if there are still multiple matches, eliminate matches
4045 # belonging to publishers that no longer have the FMRI in their
4046 # catalog.
4047 found_state = False
4048 if len(onames) > 1:
4049 mlist = []
4050 mnames = set()
4051 for m, st in olist:
4052 if not st["in_catalog"]:
4053 continue
4054 if st["state"] == pkgdefs.PKG_STATE_INSTALLED:
4055 found_state = True
4056 mnames.add(m.get_pkg_stem())
4057 mlist.append((m, st))
4058 olist = mlist
4059 onames = mnames
4061 # Finally, if there are still multiple matches, and a known
4062 # stem is installed, then eliminate any stems that do not
4063 # have an installed version.
4064 if found_state and len(onames) > 1:
4065 mlist = []
4066 mnames = set()
4067 for m, st in olist:
4068 if st["state"] == pkgdefs.PKG_STATE_INSTALLED:
4069 mnames.add(m.get_pkg_stem())
4070 mlist.append((m, st))
4071 olist = mlist
4072 onames = mnames
4074 return olist, onames
4076 def avoid_pkgs(self, pat_list, progtrack, check_cancel):
4077 """Avoid the specified packages... use pattern matching on
4078 names; ignore versions."""
4080 with self.locked_op("avoid"):
4081 ip = imageplan.ImagePlan
4082 self._avoid_set_save(self.avoid_set_get() |
4083 set(ip.match_user_stems(self, pat_list,
4084 ip.MATCH_UNINSTALLED)))
4086 def unavoid_pkgs(self, pat_list, progtrack, check_cancel):
4087 """Unavoid the specified packages... use pattern matching on
4088 names; ignore versions."""
4090 with self.locked_op("unavoid"):
4091 ip = imageplan.ImagePlan
4092 unavoid_set = set(ip.match_user_stems(self, pat_list,
4093 ip.MATCH_ALL))
4094 current_set = self.avoid_set_get()
4095 not_avoided = unavoid_set - current_set
4096 if not_avoided:
4097 raise apx.PlanCreationException(not_avoided=not_avoided)
4099 # Don't allow unavoid if removal of the package from the
4100 # avoid list would require the package to be installed
4101 # as this would invalidate current image state. If the
4102 # package is already installed though, it doesn't really
4103 # matter if it's a target of an avoid or not.
4104 installed_set = set([
4105 f.pkg_name
4106 for f in self.gen_installed_pkgs()
4109 would_install = [
4111 for f, a in self.gen_tracked_stems()
4112 if a in unavoid_set and a not in installed_set
4115 if would_install:
4116 raise apx.PlanCreationException(would_install=would_install)
4118 self._avoid_set_save(current_set - unavoid_set)
4120 def get_avoid_dict(self):
4121 """ return dict of lists (avoided stem, pkgs w/ group
4122 dependencies on this pkg)"""
4123 ret = dict((a, list()) for a in self.avoid_set_get())
4124 for fmri, group in self.gen_tracked_stems():
4125 if group in ret:
4126 ret[group].append(fmri.pkg_name)
4127 return ret
4129 def freeze_pkgs(self, pat_list, progtrack, check_cancel, dry_run,
4130 comment):
4131 """Freeze the specified packages... use pattern matching on
4132 names.
4134 The 'pat_list' parameter contains the list of patterns of
4135 packages to freeze.
4137 The 'progtrack' parameter contains the progress tracker for this
4138 operation.
4140 The 'check_cancel' parameter contains a function to call to
4141 check if the operation has been canceled.
4143 The 'dry_run' parameter controls whether packages are actually
4144 frozen.
4146 The 'comment' parameter contains the comment, if any, which will
4147 be associated with the packages that are frozen.
4150 def __make_publisherless_fmri(pat):
4151 p = pkg.fmri.MatchingPkgFmri(pat)
4152 p.publisher = None
4153 return p
4155 def __calc_frozen():
4156 stems_and_pats = imageplan.ImagePlan.freeze_pkgs_match(
4157 self, pat_list)
4158 return dict([(s, __make_publisherless_fmri(p))
4159 for s, p in stems_and_pats.iteritems()])
4160 if dry_run:
4161 return __calc_frozen().values()
4162 with self.locked_op("freeze"):
4163 stems_and_pats = __calc_frozen()
4164 # Get existing dictionary of frozen packages.
4165 d = self.__freeze_dict_load()
4166 # Update the dictionary with the new freezes and
4167 # comment.
4168 timestamp = calendar.timegm(time.gmtime())
4169 d.update([(s, (str(p), comment, timestamp))
4170 for s, p in stems_and_pats.iteritems()])
4171 self._freeze_dict_save(d)
4172 return stems_and_pats.values()
4174 def unfreeze_pkgs(self, pat_list, progtrack, check_cancel, dry_run):
4175 """Unfreeze the specified packages... use pattern matching on
4176 names; ignore versions.
4178 The 'pat_list' parameter contains the list of patterns of
4179 packages to freeze.
4181 The 'progtrack' parameter contains the progress tracker for this
4182 operation.
4184 The 'check_cancel' parameter contains a function to call to
4185 check if the operation has been canceled.
4187 The 'dry_run' parameter controls whether packages are actually
4188 frozen."""
4190 def __calc_unfrozen():
4191 # Get existing dictionary of frozen packages.
4192 d = self.__freeze_dict_load()
4193 # Match the user's patterns against the frozen packages
4194 # and return the stems which matched, and the dictionary
4195 # of the currently frozen packages.
4196 ip = imageplan.ImagePlan
4197 return set(ip.match_user_stems(self, pat_list,
4198 ip.MATCH_ALL, raise_unmatched=False,
4199 universe=[(None, k) for k in d.keys()])), d
4201 if dry_run:
4202 return __calc_unfrozen()[0]
4203 with self.locked_op("freeze"):
4204 unfrozen_set, d = __calc_unfrozen()
4205 # Remove the specified packages from the frozen set.
4206 for n in unfrozen_set:
4207 d.pop(n, None)
4208 self._freeze_dict_save(d)
4209 return unfrozen_set
4211 def __call_imageplan_evaluate(self, ip):
4212 # A plan can be requested without actually performing an
4213 # operation on the image.
4214 if self.history.operation_name:
4215 self.history.operation_start_state = ip.get_plan()
4217 try:
4218 ip.evaluate()
4219 except apx.ConflictingActionErrors:
4220 # Image plan evaluation can fail because of duplicate
4221 # action discovery, but we still want to be able to
4222 # display and log the solved FMRI changes.
4223 self.imageplan = ip
4224 if self.history.operation_name:
4225 self.history.operation_end_state = \
4226 "Unevaluated: merged plan had errors\n" + \
4227 ip.get_plan(full=False)
4228 raise
4230 self.imageplan = ip
4232 if self.history.operation_name:
4233 self.history.operation_end_state = \
4234 ip.get_plan(full=False)
4236 def __make_plan_common(self, _op, _progtrack, _check_cancel,
4237 _noexecute, _ip_noop=False, **kwargs):
4238 """Private helper function to perform base plan creation and
4239 cleanup.
4242 if DebugValues.get_value("simulate-plan-hang"):
4243 # If pkg5.hang file is present in image dir, then
4244 # sleep after loading configuration until file is
4245 # gone. This is used by the test suite for signal
4246 # handling testing, etc.
4247 hang_file = os.path.join(self.imgdir, "pkg5.hang")
4248 with open(hang_file, "w") as f:
4249 f.write(str(os.getpid()))
4251 while os.path.exists(hang_file):
4252 time.sleep(1)
4254 # Allow garbage collection of previous plan.
4255 self.imageplan = None
4257 ip = imageplan.ImagePlan(self, _op, _progtrack, _check_cancel,
4258 noexecute=_noexecute)
4260 # Always start with most current (on-disk) state information.
4261 self.__init_catalogs()
4263 try:
4264 try:
4265 if _ip_noop:
4266 ip.plan_noop(**kwargs)
4267 elif _op in [
4268 pkgdefs.API_OP_ATTACH,
4269 pkgdefs.API_OP_DETACH,
4270 pkgdefs.API_OP_SYNC]:
4271 ip.plan_sync(**kwargs)
4272 elif _op in [
4273 pkgdefs.API_OP_CHANGE_FACET,
4274 pkgdefs.API_OP_CHANGE_VARIANT]:
4275 ip.plan_change_varcets(**kwargs)
4276 elif _op == pkgdefs.API_OP_DEHYDRATE:
4277 ip.plan_dehydrate(**kwargs)
4278 elif _op == pkgdefs.API_OP_INSTALL:
4279 ip.plan_install(**kwargs)
4280 elif _op ==pkgdefs.API_OP_EXACT_INSTALL:
4281 ip.plan_exact_install(**kwargs)
4282 elif _op == pkgdefs.API_OP_FIX:
4283 ip.plan_fix(**kwargs)
4284 elif _op == pkgdefs.API_OP_REHYDRATE:
4285 ip.plan_rehydrate(**kwargs)
4286 elif _op == pkgdefs.API_OP_REVERT:
4287 ip.plan_revert(**kwargs)
4288 elif _op == pkgdefs.API_OP_SET_MEDIATOR:
4289 ip.plan_set_mediators(**kwargs)
4290 elif _op == pkgdefs.API_OP_UNINSTALL:
4291 ip.plan_uninstall(**kwargs)
4292 elif _op == pkgdefs.API_OP_UPDATE:
4293 ip.plan_update(**kwargs)
4294 else:
4295 raise RuntimeError(
4296 "Unknown api op: {0}".format(_op))
4298 except apx.ActionExecutionError as e:
4299 raise
4300 except pkg.actions.ActionError as e:
4301 raise apx.InvalidPackageErrors([e])
4302 except apx.ApiException:
4303 raise
4304 try:
4305 self.__call_imageplan_evaluate(ip)
4306 except apx.ActionExecutionError as e:
4307 raise
4308 except pkg.actions.ActionError as e:
4309 raise apx.InvalidPackageErrors([e])
4310 finally:
4311 self.__cleanup_alt_pkg_certs()
4313 def make_install_plan(self, op, progtrack, check_cancel,
4314 noexecute, pkgs_inst=None, reject_list=misc.EmptyI):
4315 """Take a list of packages, specified in pkgs_inst, and attempt
4316 to assemble an appropriate image plan. This is a helper
4317 routine for some common operations in the client.
4320 progtrack.plan_all_start()
4322 self.__make_plan_common(op, progtrack, check_cancel,
4323 noexecute, pkgs_inst=pkgs_inst,
4324 reject_list=reject_list)
4326 progtrack.plan_all_done()
4328 def make_change_varcets_plan(self, op, progtrack, check_cancel,
4329 noexecute, facets=None, reject_list=misc.EmptyI,
4330 variants=None):
4331 """Take a list of variants and/or facets and attempt to
4332 assemble an image plan which changes them. This is a helper
4333 routine for some common operations in the client."""
4335 progtrack.plan_all_start()
4336 # compute dict of changing variants
4337 if variants:
4338 new = set(variants.iteritems())
4339 cur = set(self.cfg.variants.iteritems())
4340 variants = dict(new - cur)
4341 elif facets:
4342 new_facets = self.get_facets()
4343 for f in facets:
4344 if facets[f] is None:
4345 new_facets.pop(f, None)
4346 else:
4347 new_facets[f] = facets[f]
4348 facets = new_facets
4350 self.__make_plan_common(op, progtrack, check_cancel,
4351 noexecute, new_variants=variants, new_facets=facets,
4352 reject_list=reject_list)
4354 progtrack.plan_all_done()
4356 def make_set_mediators_plan(self, op, progtrack, check_cancel,
4357 noexecute, mediators):
4358 """Take a dictionary of mediators and attempt to assemble an
4359 appropriate image plan to set or revert them based on the
4360 provided version and implementation values. This is a helper
4361 routine for some common operations in the client.
4364 progtrack.plan_all_start()
4366 # Compute dict of changing mediators.
4367 new_mediators = copy.deepcopy(mediators)
4368 old_mediators = self.cfg.mediators
4369 invalid_mediations = collections.defaultdict(dict)
4370 for m in new_mediators.keys():
4371 new_values = new_mediators[m]
4372 if not new_values:
4373 if m not in old_mediators:
4374 # Nothing to revert.
4375 del new_mediators[m]
4376 continue
4378 # Revert mediator to defaults.
4379 new_mediators[m] = {}
4380 continue
4382 # Validate mediator, provided version, implementation,
4383 # and source.
4384 valid, error = med.valid_mediator(m)
4385 if not valid:
4386 invalid_mediations[m]["mediator"] = (m, error)
4388 med_version = new_values.get("version")
4389 if med_version:
4390 valid, error = med.valid_mediator_version(
4391 med_version)
4392 if valid:
4393 new_mediators[m]["version"] = \
4394 pkg.version.Version(med_version)
4395 else:
4396 invalid_mediations[m]["version"] = \
4397 (med_version, error)
4399 med_impl = new_values.get("implementation")
4400 if med_impl:
4401 valid, error = med.valid_mediator_implementation(
4402 med_impl, allow_empty_version=True)
4403 if not valid:
4404 invalid_mediations[m]["version"] = \
4405 (med_impl, error)
4407 if invalid_mediations:
4408 raise apx.PlanCreationException(
4409 invalid_mediations=invalid_mediations)
4411 self.__make_plan_common(op, progtrack, check_cancel,
4412 noexecute, new_mediators=new_mediators)
4414 progtrack.plan_all_done()
4416 def make_sync_plan(self, op, progtrack, check_cancel,
4417 noexecute, li_pkg_updates=True, reject_list=misc.EmptyI):
4418 """Attempt to create an appropriate image plan to bring an
4419 image in sync with it's linked image constraints. This is a
4420 helper routine for some common operations in the client."""
4422 progtrack.plan_all_start()
4424 self.__make_plan_common(op, progtrack, check_cancel,
4425 noexecute, reject_list=reject_list,
4426 li_pkg_updates=li_pkg_updates)
4428 progtrack.plan_all_done()
4430 def make_uninstall_plan(self, op, progtrack, check_cancel,
4431 ignore_missing, noexecute, pkgs_to_uninstall):
4432 """Create uninstall plan to remove the specified packages."""
4434 progtrack.plan_all_start()
4436 self.__make_plan_common(op, progtrack, check_cancel,
4437 noexecute, ignore_missing=ignore_missing,
4438 pkgs_to_uninstall=pkgs_to_uninstall)
4440 progtrack.plan_all_done()
4442 def make_update_plan(self, op, progtrack, check_cancel,
4443 noexecute, ignore_missing=False, pkgs_update=None,
4444 reject_list=misc.EmptyI):
4445 """Create a plan to update all packages or the specific ones as
4446 far as possible. This is a helper routine for some common
4447 operations in the client.
4450 progtrack.plan_all_start()
4451 self.__make_plan_common(op, progtrack, check_cancel,
4452 noexecute, ignore_missing=ignore_missing,
4453 pkgs_update=pkgs_update, reject_list=reject_list)
4454 progtrack.plan_all_done()
4456 def make_revert_plan(self, op, progtrack, check_cancel,
4457 noexecute, args, tagged):
4458 """Revert the specified files, or all files tagged as specified
4459 in args to their manifest definitions.
4462 progtrack.plan_all_start()
4463 self.__make_plan_common(op, progtrack, check_cancel,
4464 noexecute, args=args, tagged=tagged)
4465 progtrack.plan_all_done()
4467 def make_dehydrate_plan(self, op, progtrack, check_cancel, noexecute,
4468 publishers):
4469 """Remove non-editable files and hardlinks from an image."""
4471 progtrack.plan_all_start()
4472 self.__make_plan_common(op, progtrack, check_cancel,
4473 noexecute, publishers=publishers)
4474 progtrack.plan_all_done()
4476 def make_rehydrate_plan(self, op, progtrack, check_cancel, noexecute,
4477 publishers):
4478 """Reinstall non-editable files and hardlinks to an dehydrated
4479 image."""
4481 progtrack.plan_all_start()
4482 self.__make_plan_common(op, progtrack, check_cancel,
4483 noexecute, publishers=publishers)
4484 progtrack.plan_all_done()
4486 def make_fix_plan(self, op, progtrack, check_cancel, noexecute, args):
4487 """Create an image plan to fix the image."""
4489 progtrack.plan_all_start()
4490 self.__make_plan_common(op, progtrack, check_cancel, noexecute,
4491 args=args)
4492 progtrack.plan_all_done()
4494 def make_noop_plan(self, op, progtrack, check_cancel,
4495 noexecute):
4496 """Create an image plan that doesn't update the image in any
4497 way."""
4499 progtrack.plan_all_start()
4500 self.__make_plan_common(op, progtrack, check_cancel,
4501 noexecute, _ip_noop=True)
4502 progtrack.plan_all_done()
4504 def ipkg_is_up_to_date(self, check_cancel, noexecute,
4505 refresh_allowed=True, progtrack=None):
4506 """Test whether the packaging system is updated to the latest
4507 version known to be available for this image."""
4510 # This routine makes the distinction between the "target image",
4511 # which will be altered, and the "running image", which is
4512 # to say whatever image appears to contain the version of the
4513 # pkg command we're running.
4517 # There are two relevant cases here:
4518 # 1) Packaging code and image we're updating are the same
4519 # image. (i.e. 'pkg update')
4521 # 2) Packaging code's image and the image we're updating are
4522 # different (i.e. 'pkg update -R')
4524 # In general, we care about getting the user to run the
4525 # most recent packaging code available for their build. So,
4526 # if we're not in the liveroot case, we create a new image
4527 # which represents "/" on the system.
4530 if not progtrack:
4531 progtrack = progress.NullProgressTracker()
4533 img = self
4535 if self.__cmddir and not img.is_liveroot():
4537 # Find the path to ourselves, and use that
4538 # as a way to locate the image we're in. It's
4539 # not perfect-- we could be in a developer's
4540 # workspace, for example.
4542 newimg = Image(self.__cmddir,
4543 allow_ondisk_upgrade=False, progtrack=progtrack,
4544 cmdpath=self.cmdpath)
4545 useimg = True
4546 if refresh_allowed:
4547 # If refreshing publisher metadata is allowed,
4548 # then perform a refresh so that a new packaging
4549 # system package can be discovered.
4550 newimg.lock(allow_unprivileged=True)
4551 try:
4552 newimg.refresh_publishers(
4553 progtrack=progtrack)
4554 except (apx.ImageFormatUpdateNeeded,
4555 apx.PermissionsException):
4556 # Can't use the image to perform an
4557 # update check and it would be wrong
4558 # to prevent the operation from
4559 # continuing in these cases.
4560 useimg = False
4561 except apx.CatalogRefreshException as cre:
4562 cre.errmessage = \
4563 _("pkg(5) update check failed.")
4564 raise
4565 finally:
4566 newimg.unlock()
4568 if useimg:
4569 img = newimg
4571 pfmri = img.get_version_installed(img.strtofmri("package/pkg"))
4572 if not pfmri or \
4573 not pkgdefs.PKG_STATE_UPGRADABLE in img.get_pkg_state(pfmri):
4574 # If no version of the package system is installed or a
4575 # newer version isn't available, then the client is
4576 # "up-to-date".
4577 return True
4579 inc_fmri = img.get_version_installed(img.strtofmri(
4580 "consolidation/ips/ips-incorporation"))
4581 if inc_fmri:
4582 # If the ips-incorporation is installed (it should be
4583 # since package/pkg depends on it), then we can
4584 # bypass the solver and plan evaluation if none of the
4585 # newer versions are allowed by the incorporation.
4587 # Find the version at which package/pkg is incorporated.
4588 cat = img.get_catalog(img.IMG_CATALOG_KNOWN)
4589 inc_ver = None
4590 for act in cat.get_entry_actions(inc_fmri, [cat.DEPENDENCY],
4591 excludes=img.list_excludes()):
4592 if act.name == "depend" and \
4593 act.attrs["type"] == "incorporate" and \
4594 act.attrs["fmri"].startswith("package/pkg"):
4595 inc_ver = img.strtofmri(
4596 act.attrs["fmri"]).version
4597 break
4599 if inc_ver:
4600 for ver, fmris in cat.fmris_by_version(
4601 "package/pkg"):
4602 if ver != pfmri.version and \
4603 ver.is_successor(inc_ver,
4604 pkg.version.CONSTRAINT_AUTO):
4605 break
4606 else:
4607 # No version is newer than installed and
4608 # satisfied incorporation constraint.
4609 return True
4611 # XXX call to progress tracker that the package is being
4612 # refreshed
4613 img.make_install_plan(pkgdefs.API_OP_INSTALL, progtrack,
4614 check_cancel, noexecute, pkgs_inst=["pkg:/package/pkg"])
4616 return img.imageplan.nothingtodo()
4618 # avoid set implementation uses simplejson to store a
4619 # set of pkg_stems being avoided, and a set of tracked
4620 # stems that are obsolete.
4622 # format is (version, dict((pkg stem, "avoid" or "obsolete"))
4624 __AVOID_SET_VERSION = 1
4626 def avoid_set_get(self):
4627 """Return copy of avoid set"""
4628 return self.__avoid_set.copy()
4630 def obsolete_set_get(self):
4631 """Return copy of tracked obsolete pkgs"""
4632 return self.__group_obsolete.copy()
4634 def __avoid_set_load(self):
4635 """Load avoid set fron image state directory"""
4636 state_file = os.path.join(self._statedir, "avoid_set")
4637 self.__avoid_set = set()
4638 self.__group_obsolete = set()
4639 if os.path.isfile(state_file):
4640 version, d = json.load(file(state_file))
4641 assert version == self.__AVOID_SET_VERSION
4642 for stem in d:
4643 if d[stem] == "avoid":
4644 self.__avoid_set.add(stem)
4645 elif d[stem] == "obsolete":
4646 self.__group_obsolete.add(stem)
4647 else:
4648 logger.warn("Corrupted avoid list - ignoring")
4649 self.__avoid_set = set()
4650 self.__group_obsolete = set()
4651 self.__avoid_set_altered = True
4652 else:
4653 self.__avoid_set_altered = True
4655 def _avoid_set_save(self, new_set=None, obsolete=None):
4656 """Store avoid set to image state directory"""
4657 if new_set is not None:
4658 self.__avoid_set_altered = True
4659 self.__avoid_set = new_set
4661 if obsolete is not None:
4662 self.__group_obsolete = obsolete
4663 self.__avoid_set_altered = True
4665 if not self.__avoid_set_altered:
4666 return
4669 state_file = os.path.join(self._statedir, "avoid_set")
4670 tmp_file = os.path.join(self._statedir, "avoid_set.new")
4671 tf = file(tmp_file, "w")
4673 d = dict((a, "avoid") for a in self.__avoid_set)
4674 d.update((a, "obsolete") for a in self.__group_obsolete)
4676 try:
4677 json.dump((self.__AVOID_SET_VERSION, d), tf)
4678 tf.close()
4679 portable.rename(tmp_file, state_file)
4681 except Exception as e:
4682 logger.warn("Cannot save avoid list: {0}".format(
4683 str(e)))
4684 return
4686 self.__avoid_set_altered = False
4688 # frozen dict implementation uses simplejson to store a dictionary of
4689 # pkg_stems that are frozen, the versions at which they're frozen, and
4690 # the reason, if given, why the package was frozen.
4692 # format is (version, dict((pkg stem, (fmri, comment, timestamp))))
4694 __FROZEN_DICT_VERSION = 1
4696 def get_frozen_list(self):
4697 """Return a list of tuples containing the fmri that was frozen,
4698 and the reason it was frozen."""
4700 return [
4701 (pkg.fmri.MatchingPkgFmri(v[0]), v[1], v[2])
4702 for v in self.__freeze_dict_load().values()
4705 def __freeze_dict_load(self):
4706 """Load the dictionary containing the current state of frozen
4707 packages."""
4709 state_file = os.path.join(self._statedir, "frozen_dict")
4710 if os.path.isfile(state_file):
4711 try:
4712 version, d = json.load(file(state_file))
4713 except EnvironmentError as e:
4714 raise apx._convert_error(e)
4715 except ValueError as e:
4716 raise apx.InvalidFreezeFile(state_file)
4717 if version != self.__FROZEN_DICT_VERSION:
4718 raise apx.UnknownFreezeFileVersion(
4719 version, self.__FROZEN_DICT_VERSION,
4720 state_file)
4721 return d
4722 return {}
4724 def _freeze_dict_save(self, new_dict):
4725 """Save the dictionary of frozen packages."""
4727 # Save the dictionary to disk.
4728 state_file = os.path.join(self._statedir, "frozen_dict")
4729 tmp_file = os.path.join(self._statedir, "frozen_dict.new")
4731 try:
4732 with open(tmp_file, "w") as tf:
4733 json.dump(
4734 (self.__FROZEN_DICT_VERSION, new_dict), tf)
4735 portable.rename(tmp_file, state_file)
4736 except EnvironmentError as e:
4737 raise apx._convert_error(e)
4738 self.__rebuild_image_catalogs()
4740 @staticmethod
4741 def get_dehydrated_exclude_func(dehydrated_pubs):
4742 """A boolean function that will be added to the pkg(5) exclude
4743 mechanism to determine if an action is allowed to be installed
4744 based on whether its publisher is going to be dehydrated or has
4745 been currently dehydrated."""
4747 # A closure is used so that the list of dehydrated publishers
4748 # can be accessed.
4749 def __allow_action_dehydrate(act, publisher):
4750 if publisher not in dehydrated_pubs:
4751 # Allow actions from publishers that are not
4752 # dehydrated.
4753 return True
4755 aname = act.name
4756 if aname == "file":
4757 attrs = act.attrs
4758 if attrs.get("dehydrate") == "false":
4759 return True
4760 if "preserve" in attrs or "overlay" in attrs:
4761 return True
4762 return False
4763 elif aname == "hardlink":
4764 return False
4766 return True
4768 return __allow_action_dehydrate