5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
24 # Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
37 import simplejson
as json
44 from contextlib
import contextmanager
45 from pkg
.client
import global_settings
46 logger
= global_settings
.logger
47 from cryptography
import x509
48 from cryptography
.hazmat
.backends
import default_backend
52 import pkg
.client
.api_errors
as apx
53 import pkg
.client
.bootenv
as bootenv
54 import pkg
.client
.history
as history
55 import pkg
.client
.imageconfig
as imageconfig
56 import pkg
.client
.imageplan
as imageplan
57 import pkg
.client
.linkedimage
as li
58 import pkg
.client
.pkgdefs
as pkgdefs
59 import pkg
.client
.pkgplan
as pkgplan
60 import pkg
.client
.plandesc
as plandesc
61 import pkg
.client
.progress
as progress
62 import pkg
.client
.publisher
as publisher
63 import pkg
.client
.sigpolicy
as sigpolicy
64 import pkg
.client
.transport
.transport
as transport
65 import pkg
.config
as cfg
66 import pkg
.file_layout
.layout
as fl
68 import pkg
.lockfile
as lockfile
69 import pkg
.manifest
as manifest
70 import pkg
.mediator
as med
71 import pkg
.misc
as misc
73 import pkg
.pkgsubprocess
as subprocess
74 import pkg
.portable
as portable
75 import pkg
.server
.catalog
79 from pkg
.client
.debugvalues
import DebugValues
80 from pkg
.client
.imagetypes
import IMG_USER
, IMG_ENTIRE
81 from pkg
.client
.transport
.exception
import InvalidContentException
82 from pkg
.misc
import EmptyI
, EmptyDict
84 img_user_prefix
= ".org.opensolaris,pkg"
85 img_root_prefix
= "var/pkg"
87 IMG_PUB_DIR
= "publisher"
90 """An Image object is a directory tree containing the laid-down contents
91 of a self-consistent graph of Packages.
93 An Image has a root path.
95 An Image of type IMG_ENTIRE does not have a parent Image. Other Image
96 types must have a parent Image. The external state of the parent Image
97 must be accessible from the Image's context, or duplicated within the
98 Image (IMG_PARTIAL for zones, for instance).
100 The parent of a user Image can be a partial Image. The parent of a
101 partial Image must be an entire Image.
103 An Image of type IMG_USER stores its external state at self.root +
104 ".org.opensolaris,pkg".
106 An Image of type IMG_ENTIRE or IMG_PARTIAL stores its external state at
107 self.root + "/var/pkg".
109 An Image needs to be able to have a different repository set than the
112 For image format details, see section 5.3 of doc/on-disk-format.txt
118 IMG_CATALOG_KNOWN
= "known"
119 IMG_CATALOG_INSTALLED
= "installed"
121 __STATE_UPDATING_FILE
= "state_updating"
123 def __init__(self
, root
, user_provided_dir
=False, progtrack
=None,
124 should_exist
=True, imgtype
=None, force
=False,
125 augment_ta_from_parent_image
=True, allow_ondisk_upgrade
=None,
126 props
=misc
.EmptyDict
, cmdpath
=None):
129 assert(imgtype
is None)
132 assert(imgtype
is not None)
134 # Alternate package sources.
135 self
.__alt
_pkg
_pub
_map
= None
136 self
.__alt
_pubs
= None
137 self
.__alt
_known
_cat
= None
138 self
.__alt
_pkg
_sources
_loaded
= False
140 # Determine identity of client executable if appropriate.
142 cmdpath
= misc
.api_cmdpath()
143 self
.cmdpath
= cmdpath
145 if self
.cmdpath
!= None:
146 self
.__cmddir
= os
.path
.dirname(cmdpath
)
148 # prevent brokeness in the test suite
149 if self
.cmdpath
and \
150 "PKG_NO_RUNPY_CMDPATH" in os
.environ
and \
151 self
.cmdpath
.endswith(os
.sep
+ "run.py"):
152 raise RuntimeError("""
153 An Image object was allocated from within ipkg test suite and
154 cmdpath was not explicitly overridden. Please make sure to
155 explicitly set cmdpath when allocating an Image object, or
156 override cmdpath when allocating an Image object by setting PKG_CMDPATH
157 in the environment or by setting simulate_cmdpath in DebugValues.""")
161 # Indicates whether automatic image format upgrades of the
162 # on-disk format are allowed.
163 self
.allow_ondisk_upgrade
= allow_ondisk_upgrade
164 self
.__upgraded
= False
166 # Must happen after upgraded assignment.
167 self
.__init
_catalogs
()
172 self
.blocking_locks
= False
174 self
.history
= history
.History()
175 self
.imageplan
= None
176 self
.img_prefix
= None
177 self
.index_dir
= None
181 # Can have multiple read cache dirs...
182 self
.__read
_cache
_dirs
= []
184 # ...but only one global write cache dir and incoming write dir.
185 self
.__write
_cache
_dir
= None
186 self
.__user
_cache
_dir
= None
187 self
._incoming
_cache
_dir
= None
189 # Set if write_cache is actually a tree like /var/pkg/publisher
190 # instead of a flat cache.
191 self
.__write
_cache
_root
= None
193 self
.__lock
= pkg
.nrlock
.NRLock()
194 self
.__lockfile
= None
195 self
.__sig
_policy
= None
196 self
.__trust
_anchors
= None
197 self
.__bad
_trust
_anchors
= []
199 # cache for presence of boot-archive
200 self
.__boot
_archive
= None
202 # When users and groups are added before their database files
203 # have been installed, the actions store them temporarily in the
204 # image, in these members.
207 self
._usersbyname
= {}
208 self
._groupsbyname
= {}
210 # Set of pkg stems being avoided
211 self
.__avoid
_set
= None
212 self
.__avoid
_set
_altered
= False
214 # set of pkg stems subject to group
215 # dependency but removed because obsolete
216 self
.__group
_obsolete
= None
218 # The action dictionary that's returned by __load_actdict.
219 self
.__actdict
= None
220 self
.__actdict
_timestamp
= None
222 self
.__property
_overrides
= { "property": props
}
224 # Transport operations for this image
225 self
.transport
= transport
.Transport(
226 transport
.ImageTransportCfg(self
))
228 self
.linked
= li
.LinkedImage(self
)
231 self
.find_root(self
.root
, user_provided_dir
,
234 if not force
and self
.image_type(self
.root
) != None:
235 raise apx
.ImageAlreadyExists(self
.root
)
236 if not force
and os
.path
.exists(self
.root
):
237 # ignore .zfs snapdir if it's present
238 snapdir
= os
.path
.join(self
.root
, ".zfs")
239 listdir
= set(os
.listdir(self
.root
))
240 if os
.path
.isdir(snapdir
):
241 listdir
-= set([".zfs"])
243 raise apx
.CreatingImageInNonEmptyDir(
245 self
.__set
_dirs
(root
=self
.root
, imgtype
=imgtype
,
246 progtrack
=progtrack
, purge
=True)
248 # right now we don't explicitly set dir/file modes everywhere;
249 # set umask to proper value to prevent problems w/ overly
253 self
.augment_ta_from_parent_image
= augment_ta_from_parent_image
255 def __catalog_loaded(self
, name
):
256 """Returns a boolean value indicating whether the named catalog
257 has already been loaded. This is intended to be used as an
258 optimization function to determine which catalog to request."""
260 return name
in self
.__catalogs
262 def __init_catalogs(self
):
263 """Initializes default catalog state. Actual data is provided
264 on demand via get_catalog()"""
266 if self
.__upgraded
and self
.version
< 3:
267 # Ignore request; transformed catalog data only exists
268 # in memory and can't be reloaded from disk.
271 # This is used to cache image catalogs.
273 self
.__alt
_pkg
_sources
_loaded
= False
276 def alloc(*args
, **kwargs
):
277 return Image(*args
, **kwargs
)
281 """The absolute path of the image's metadata."""
286 """A boolean value indicating whether the image is currently
289 return self
.__lock
and self
.__lock
.locked
293 """The absolute path of the image's location."""
297 def signature_policy(self
):
298 """The current signature policy for this image."""
300 if self
.__sig
_policy
is not None:
301 return self
.__sig
_policy
302 txt
= self
.cfg
.get_policy_str(imageconfig
.SIGNATURE_POLICY
)
303 names
= self
.cfg
.get_property("property",
304 "signature-required-names")
305 self
.__sig
_policy
= sigpolicy
.Policy
.policy_factory(txt
, names
)
306 return self
.__sig
_policy
309 def trust_anchors(self
):
310 """A dictionary mapping subject hashes for certificates this
311 image trusts to those certs. The image trusts the trust anchors
312 in its trust_anchor_dir and those in the image from which the
315 if self
.__trust
_anchors
is not None:
316 return self
.__trust
_anchors
318 user_set_ta_loc
= True
319 rel_dir
= self
.get_property("trust-anchor-directory")
320 if rel_dir
[0] == "/":
321 rel_dir
= rel_dir
[1:]
322 trust_anchor_loc
= os
.path
.join(self
.root
, rel_dir
)
323 loc_is_dir
= os
.path
.isdir(trust_anchor_loc
)
324 pkg_trust_anchors
= {}
325 if self
.__cmddir
and self
.augment_ta_from_parent_image
:
326 pkg_trust_anchors
= Image(self
.__cmddir
,
327 augment_ta_from_parent_image
=False,
328 cmdpath
=self
.cmdpath
).trust_anchors
329 if not loc_is_dir
and os
.path
.exists(trust_anchor_loc
):
330 raise apx
.InvalidPropertyValue(_("The trust "
331 "anchors for the image were expected to be found "
332 "in {0}, but that is not a directory. Please set "
333 "the image property 'trust-anchor-directory' to "
334 "the correct path.").format(trust_anchor_loc
))
335 self
.__trust
_anchors
= {}
337 for fn
in os
.listdir(trust_anchor_loc
):
338 pth
= os
.path
.join(trust_anchor_loc
, fn
)
339 if os
.path
.islink(pth
):
342 with
open(pth
, "rb") as f
:
345 x509
.load_pem_x509_certificate(
346 raw
, default_backend())
347 except (ValueError, IOError) as e
:
348 self
.__bad
_trust
_anchors
.append(
351 # We store certificates internally by
352 # the SHA-1 hash of its subject.
353 s
= hashlib
.sha1(misc
.force_bytes(
354 trusted_ca
.subject
)).hexdigest()
355 self
.__trust
_anchors
.setdefault(s
, [])
356 self
.__trust
_anchors
[s
].append(
358 for s
in pkg_trust_anchors
:
359 if s
not in self
.__trust
_anchors
:
360 self
.__trust
_anchors
[s
] = pkg_trust_anchors
[s
]
361 return self
.__trust
_anchors
364 def bad_trust_anchors(self
):
365 """A list of strings decribing errors encountered while parsing
368 return [_("{path} is expected to be a certificate but could "
369 "not be parsed. The error encountered "
370 "was:\n\t{err}").format(path
=p
, err
=e
)
371 for p
, e
in self
.__bad
_trust
_anchors
375 def write_cache_path(self
):
376 """The path to the filesystem that holds the write cache--used
377 to compute whether sufficent space is available for
380 return self
.__user
_cache
_dir
or \
381 os
.path
.join(self
.imgdir
, IMG_PUB_DIR
)
384 def locked_op(self
, op
, allow_unprivileged
=False, new_history_op
=True):
385 """Helper method for executing an image-modifying operation
386 that needs locking. It automatically handles calling
387 log_operation_start and log_operation_end by default. Locking
388 behaviour is controlled by the blocking_locks image property.
390 'allow_unprivileged' is an optional boolean value indicating
391 that permissions-related exceptions should be ignored when
392 attempting to obtain the lock as the related operation will
393 still work correctly even though the image cannot (presumably)
396 'new_history_op' indicates whether we should handle history
401 self
.lock(allow_unprivileged
=allow_unprivileged
)
404 bootenv
.BootEnv
.get_be_name(self
.root
)
406 self
.history
.log_operation_start(op
,
407 be_name
=be_name
, be_uuid
=be_uuid
)
409 except apx
.ImageLockedError
as e
:
410 # Don't unlock the image if the call failed to
414 except Exception as e
:
422 self
.history
.log_operation_end(error
=error
)
424 def lock(self
, allow_unprivileged
=False):
425 """Locks the image in preparation for an image-modifying
426 operation. Raises an ImageLockedError exception on failure.
427 Locking behaviour is controlled by the blocking_locks image
430 'allow_unprivileged' is an optional boolean value indicating
431 that permissions-related exceptions should be ignored when
432 attempting to obtain the lock as the related operation will
433 still work correctly even though the image cannot (presumably)
437 blocking
= self
.blocking_locks
439 # First, attempt to obtain a thread lock.
440 if not self
.__lock
.acquire(blocking
=blocking
):
441 raise apx
.ImageLockedError()
444 # Attempt to obtain a file lock.
445 self
.__lockfile
.lock(blocking
=blocking
)
446 except EnvironmentError as e
:
448 if e
.errno
== errno
.ENOENT
:
450 if e
.errno
== errno
.EACCES
:
451 exc
= apx
.UnprivilegedUserError(e
.filename
)
452 elif e
.errno
== errno
.EROFS
:
453 exc
= apx
.ReadOnlyFileSystemException(
456 self
.__lock
.release()
459 if exc
and not allow_unprivileged
:
460 self
.__lock
.release()
463 # If process lock fails, ensure thread lock is released.
464 self
.__lock
.release()
468 """Unlocks the image."""
472 self
.__lockfile
.unlock()
474 self
.__lock
.release()
476 def image_type(self
, d
):
477 """Returns the type of image at directory: d; or None"""
480 def is_image(sub_d
, prefix
):
481 # First check for new image configuration file.
482 if os
.path
.isfile(os
.path
.join(sub_d
, prefix
,
484 # Regardless of directory structure, assume
485 # this is an image for now.
488 if not os
.path
.isfile(os
.path
.join(sub_d
, prefix
,
490 # For older formats, if configuration is
491 # missing, this can't be an image.
494 # Configuration exists, but for older formats,
495 # all of these directories have to exist.
496 for n
in ("state", "pkg"):
497 if not os
.path
.isdir(os
.path
.join(sub_d
, prefix
,
503 if os
.path
.isdir(os
.path
.join(d
, img_user_prefix
)) and \
504 is_image(d
, img_user_prefix
):
506 elif os
.path
.isdir(os
.path
.join(d
, img_root_prefix
)) and \
507 is_image(d
, img_root_prefix
):
511 def find_root(self
, d
, exact_match
=False, progtrack
=None):
512 # Ascend from the given directory d to find first
513 # encountered image. If exact_match is true, if the
514 # image found doesn't match startd, raise an
515 # ImageNotFoundException.
518 # eliminate problem if relative path such as "." is passed in
519 d
= os
.path
.realpath(d
)
522 imgtype
= self
.image_type(d
)
523 if imgtype
in (IMG_USER
, IMG_ENTIRE
):
525 os
.path
.realpath(startd
) != \
527 raise apx
.ImageNotFoundException(
528 exact_match
, startd
, d
)
529 self
.__set
_dirs
(imgtype
=imgtype
, root
=d
,
530 startd
=startd
, progtrack
=progtrack
)
533 # XXX follow symlinks or not?
535 d
= os
.path
.normpath(os
.path
.join(d
, os
.path
.pardir
))
537 # Make sure we are making progress and aren't in an
540 # (XXX - Need to deal with symlinks here too)
542 raise apx
.ImageNotFoundException(
543 exact_match
, startd
, d
)
545 def __load_config(self
):
546 """Load this image's cached configuration from the default
547 location. This function should not be called anywhere other
548 than __set_dirs()."""
550 # XXX Incomplete with respect to doc/image.txt description of
553 if self
.root
== None:
554 raise RuntimeError("self.root must be set")
557 if self
.version
> -1:
558 if self
.version
>= 3:
559 # Configuration version is currently 3
560 # for all v3 images and newer.
563 version
= self
.version
565 self
.cfg
= imageconfig
.ImageConfig(self
.__cfgpathname
,
566 self
.root
, version
=version
,
567 overrides
=self
.__property
_overrides
)
570 self
.cfg
= imageconfig
.BlendedConfig(self
.cfg
,
571 self
.get_catalog(self
.IMG_CATALOG_INSTALLED
).\
572 get_package_counts_by_pub(),
573 self
.imgdir
, self
.transport
,
574 self
.cfg
.get_policy("use-system-repo"))
576 self
.__load
_publisher
_ssl
()
578 def __store_publisher_ssl(self
):
579 """Normalizes publisher SSL configuration data, storing any
580 certificate files as needed in the image's SSL directory. This
581 logic is performed here in the image instead of ImageConfig as
582 it relies on special knowledge of the image structure."""
584 ssl_dir
= os
.path
.join(self
.imgdir
, "ssl")
586 def store_ssl_file(src
):
588 if not src
or not os
.path
.exists(src
):
589 # If SSL file doesn't exist (for
590 # whatever reason), then don't update
591 # configuration. (Let the failure
592 # happen later during an operation
593 # that requires the file.)
595 except EnvironmentError as e
:
596 raise apx
._convert
_error
(e
)
598 # Ensure ssl_dir exists; makedirs handles any errors.
599 misc
.makedirs(ssl_dir
)
602 # Destination name is based on digest of file.
603 # In order for this image to interoperate with
604 # older and newer clients, we must use sha-1
606 dest
= os
.path
.join(ssl_dir
,
607 misc
.get_data_digest(src
,
608 hash_func
=hashlib
.sha1
)[0])
610 portable
.copyfile(src
, dest
)
612 # Ensure file can be read by unprivileged users.
613 os
.chmod(dest
, misc
.PKG_FILE_MODE
)
614 except EnvironmentError as e
:
615 raise apx
._convert
_error
(e
)
618 for pub
in self
.cfg
.publishers
.values():
619 # self.cfg.publishers is used because gen_publishers
620 # includes temporary publishers and this is only for
622 repo
= pub
.repository
626 # Store and normalize ssl_cert and ssl_key.
627 for u
in repo
.origins
+ repo
.mirrors
:
628 for prop
in ("ssl_cert", "ssl_key"):
629 pval
= getattr(u
, prop
)
631 pval
= store_ssl_file(pval
)
634 # Store path as absolute to image root,
635 # it will be corrected on load to match
636 # actual image location if needed.
638 os
.path
.splitdrive(self
.root
)[0] +
640 misc
.relpath(pval
, start
=self
.root
))
642 def __load_publisher_ssl(self
):
643 """Should be called every time image configuration is loaded;
644 ensure ssl_cert and ssl_key properties of publisher repository
645 URI objects match current image location."""
647 ssl_dir
= os
.path
.join(self
.imgdir
, "ssl")
649 for pub
in self
.cfg
.publishers
.values():
650 # self.cfg.publishers is used because gen_publishers
651 # includes temporary publishers and this is only for
653 repo
= pub
.repository
657 for u
in repo
.origins
+ repo
.mirrors
:
658 for prop
in ("ssl_cert", "ssl_key"):
659 pval
= getattr(u
, prop
)
662 if not os
.path
.join(self
.img_prefix
,
663 "ssl") in os
.path
.dirname(pval
):
665 # If special image directory is part
666 # of path, then assume path should be
667 # rewritten to match current image
669 setattr(u
, prop
, os
.path
.join(ssl_dir
,
670 os
.path
.basename(pval
)))
672 def save_config(self
):
673 # First, create the image directories if they haven't been, so
674 # the configuration file can be written.
677 self
.__store
_publisher
_ssl
()
679 self
.__load
_publisher
_ssl
()
681 # Remove the old the pkg.sysrepo(1M) cache, if present.
682 cache_path
= os
.path
.join(self
.root
,
683 global_settings
.sysrepo_pub_cache_path
)
685 portable
.remove(cache_path
)
686 except EnvironmentError as e
:
687 if e
.errno
!= errno
.ENOENT
:
688 raise apx
._convert
_error
(e
)
690 if self
.is_liveroot() and \
692 "svc:/application/pkg/system-repository:default") in \
693 (smf
.SMF_SVC_TMP_ENABLED
, smf
.SMF_SVC_ENABLED
):
695 "svc:/application/pkg/system-repository:default"])
697 # This ensures all old transport configuration is thrown away.
698 self
.transport
= transport
.Transport(
699 transport
.ImageTransportCfg(self
))
701 def mkdirs(self
, root
=None, version
=None):
702 """Create any missing parts of the image's directory structure.
704 'root' is an optional path to a directory to create the new
705 image structure in. If not provided, the current image
706 directory is the default.
708 'version' is an optional integer value indicating the version
709 of the structure to create. If not provided, the current image
710 version is the default.
716 version
= self
.version
718 if version
== self
.CURRENT_VERSION
:
719 img_dirs
= ["cache/index", "cache/publisher",
720 "cache/tmp", "gui_cache", "history", "license",
721 "lost+found", "publisher", "ssl", "state/installed",
724 img_dirs
= ["download", "file", "gui_cache", "history",
725 "index", "lost+found", "pkg", "publisher",
726 "state/installed", "state/known", "tmp"]
730 misc
.makedirs(os
.path
.join(root
, sd
))
731 except EnvironmentError as e
:
732 raise apx
._convert
_error
(e
)
734 def __set_dirs(self
, imgtype
, root
, startd
=None, progtrack
=None,
736 # Ensure upgraded status is reset.
737 self
.__upgraded
= False
739 if not self
.__allow
_liveroot
() and root
== misc
.liveroot():
743 "Live root image access is disabled but was \
744 attempted.\nliveroot: {0}\nimage path: {1}".format(
745 misc
.liveroot(), startd
))
749 if self
.type == IMG_USER
:
750 self
.img_prefix
= img_user_prefix
752 self
.img_prefix
= img_root_prefix
754 # Use a new Transport object every time location is changed.
755 self
.transport
= transport
.Transport(
756 transport
.ImageTransportCfg(self
))
758 # cleanup specified path
759 if os
.path
.isdir(root
):
762 except Exception as e
:
763 # If current directory can't be obtained for any
764 # reason, ignore the error.
769 self
.__root
= os
.getcwd()
770 except EnvironmentError as e
:
771 raise apx
._convert
_error
(e
)
776 # If current image is locked, then it should be unlocked
777 # and then relocked after the imgdir is changed. This
778 # ensures that alternate BE scenarios work.
779 relock
= self
.imgdir
and self
.locked
783 # Must set imgdir first.
784 self
.__imgdir
= os
.path
.join(self
.root
, self
.img_prefix
)
786 # Force a reset of version.
789 # Assume version 4+ configuration location.
790 self
.__cfgpathname
= os
.path
.join(self
.imgdir
, "pkg5.image")
792 # In the case of initial image creation, purge is specified
793 # to ensure that when an image is created over an existing
794 # one, any old data is removed first.
795 if purge
and os
.path
.exists(self
.imgdir
):
796 for entry
in os
.listdir(self
.imgdir
):
798 # Preserve certs and keys directory
799 # as a special exception.
801 epath
= os
.path
.join(self
.imgdir
, entry
)
803 if os
.path
.isdir(epath
):
806 portable
.remove(epath
)
807 except EnvironmentError as e
:
808 raise apx
._convert
_error
(e
)
810 # Determine if the version 4 configuration file exists.
811 if not os
.path
.exists(self
.__cfgpathname
):
812 self
.__cfgpathname
= os
.path
.join(self
.imgdir
,
815 # Load the image configuration.
820 self
.version
= int(self
.cfg
.get_property("image",
822 except (cfg
.PropertyConfigError
, ValueError):
823 # If version couldn't be read from
824 # configuration, then allow fallback
825 # path below to set things right.
828 if self
.version
<= 0:
829 # If version doesn't exist, attempt to determine version
830 # based on structure.
831 pub_root
= os
.path
.join(self
.imgdir
, IMG_PUB_DIR
)
833 # This is a new image.
834 self
.version
= self
.CURRENT_VERSION
835 elif os
.path
.exists(pub_root
):
836 cache_root
= os
.path
.join(self
.imgdir
, "cache")
837 if os
.path
.exists(cache_root
):
838 # The image must be corrupted, as the
839 # version should have been loaded from
840 # configuration. For now, raise an
841 # exception. In the future, this
842 # behaviour should probably be optional
843 # so that pkg fix or pkg verify can
844 # still use the image.
845 raise apx
.UnsupportedImageError(
848 # Assume version 3 image.
851 # Reload image configuration again now that
852 # version has been determined so that property
855 elif os
.path
.exists(os
.path
.join(self
.imgdir
,
859 # Reload image configuration again now that
860 # version has been determined so that property
864 # Format is too old or invalid.
865 raise apx
.UnsupportedImageError(self
.root
)
867 if self
.version
> self
.CURRENT_VERSION
or self
.version
< 2:
868 # Image is too new or too old.
869 raise apx
.UnsupportedImageError(self
.root
)
871 # Ensure image version matches determined one; this must
872 # be set *after* the version checks above.
873 self
.cfg
.set_property("image", "version", self
.version
)
875 # Remaining dirs may now be set.
876 if self
.version
== self
.CURRENT_VERSION
:
877 self
.__tmpdir
= os
.path
.join(self
.imgdir
, "cache",
880 self
.__tmpdir
= os
.path
.join(self
.imgdir
, "tmp")
881 self
._statedir
= os
.path
.join(self
.imgdir
, "state")
882 self
.plandir
= os
.path
.join(self
.__tmpdir
, "plan")
883 self
.update_index_dir()
885 self
.history
.root_dir
= self
.imgdir
886 self
.__lockfile
= lockfile
.LockFile(os
.path
.join(self
.imgdir
,
887 "lock"), set_lockstr
=lockfile
.client_lock_set_str
,
888 get_lockstr
=lockfile
.client_lock_get_str
,
889 failure_exc
=apx
.ImageLockedError
,
895 # Setup cache directories.
896 self
.__read
_cache
_dirs
= []
897 self
._incoming
_cache
_dir
= None
898 self
.__user
_cache
_dir
= None
899 self
.__write
_cache
_dir
= None
900 self
.__write
_cache
_root
= None
901 # The user specified cache is used as an additional place to
902 # read cache data from, but as the only place to store new
904 if "PKG_CACHEROOT" in os
.environ
:
905 # If set, cache is structured like /var/pkg/publisher.
906 # get_cachedirs() will build paths for each publisher's
907 # cache using this directory.
908 self
.__user
_cache
_dir
= os
.path
.normpath(
909 os
.environ
["PKG_CACHEROOT"])
910 self
.__write
_cache
_root
= self
.__user
_cache
_dir
911 elif "PKG_CACHEDIR" in os
.environ
:
912 # If set, cache is a flat structure that is used for
914 self
.__user
_cache
_dir
= os
.path
.normpath(
915 os
.environ
["PKG_CACHEDIR"])
916 self
.__write
_cache
_dir
= self
.__user
_cache
_dir
917 # Since the cache structure is flat, add it to the
918 # list of global read caches.
919 self
.__read
_cache
_dirs
.append(self
.__user
_cache
_dir
)
920 if self
.__user
_cache
_dir
:
921 self
._incoming
_cache
_dir
= os
.path
.join(
922 self
.__user
_cache
_dir
,
923 "incoming-{0:d}".format(os
.getpid()))
926 self
.__action
_cache
_dir
= self
.temporary_dir()
928 self
.__action
_cache
_dir
= os
.path
.join(self
.imgdir
,
932 if not self
.__user
_cache
_dir
:
933 self
.__write
_cache
_dir
= os
.path
.join(
934 self
.imgdir
, "download")
935 self
._incoming
_cache
_dir
= os
.path
.join(
936 self
.__write
_cache
_dir
,
937 "incoming-{0:d}".format(os
.getpid()))
938 self
.__read
_cache
_dirs
.append(os
.path
.normpath(
939 os
.path
.join(self
.imgdir
, "download")))
940 elif not self
._incoming
_cache
_dir
:
941 # Only a global incoming cache exists for newer images.
942 self
._incoming
_cache
_dir
= os
.path
.join(self
.imgdir
,
943 "cache", "incoming-{0:d}".format(os
.getpid()))
945 # Test if we have the permissions to create the cache
946 # incoming directory in this hierarchy. If not, we'll need to
947 # move it somewhere else.
949 os
.makedirs(self
._incoming
_cache
_dir
)
950 except EnvironmentError as e
:
951 if e
.errno
== errno
.EACCES
or e
.errno
== errno
.EROFS
:
952 self
.__write
_cache
_dir
= tempfile
.mkdtemp(
953 prefix
="download-{0:d}-".format(
955 self
._incoming
_cache
_dir
= os
.path
.normpath(
956 os
.path
.join(self
.__write
_cache
_dir
,
957 "incoming-{0:d}".format(os
.getpid())))
958 self
.__read
_cache
_dirs
.append(
959 self
.__write
_cache
_dir
)
960 # There's no image cleanup hook, so we'll just
961 # remove this directory on process exit.
962 atexit
.register(shutil
.rmtree
,
963 self
.__write
_cache
_dir
, ignore_errors
=True)
965 os
.removedirs(self
._incoming
_cache
_dir
)
967 # Forcibly discard image catalogs so they can be re-loaded
968 # from the new location if they are already loaded. This
969 # also prevents scribbling on image state information in
970 # the wrong location.
971 self
.__init
_catalogs
()
973 # Upgrade the image's format if needed.
974 self
.update_format(allow_unprivileged
=True,
977 # If we haven't loaded the system publisher configuration, do
979 if isinstance(self
.cfg
, imageconfig
.ImageConfig
):
980 self
.cfg
= imageconfig
.BlendedConfig(self
.cfg
,
981 self
.get_catalog(self
.IMG_CATALOG_INSTALLED
).\
982 get_package_counts_by_pub(),
983 self
.imgdir
, self
.transport
,
984 self
.cfg
.get_policy("use-system-repo"))
986 # Check to see if any system publishers have been changed.
987 # If so they need to be refreshed, so clear last_refreshed.
988 for p
in self
.cfg
.modified_pubs
:
989 p
.meta_root
= self
._get
_publisher
_meta
_root
(p
.prefix
)
990 p
.last_refreshed
= None
992 # Check to see if any system publishers have been
993 # removed. If they have, remove their metadata and
994 # rebuild the catalogs.
996 for p
in self
.cfg
.removed_pubs
:
997 p
.meta_root
= self
._get
_publisher
_meta
_root
(p
.prefix
)
999 self
.remove_publisher_metadata(p
, rebuild
=False)
1001 except apx
.PermissionsException
:
1004 self
.__rebuild
_image
_catalogs
()
1006 # we delay writing out any new system repository configuration
1007 # until we've updated on on-disk catalog state. (otherwise we
1008 # could lose track of syspub publishers changes and either
1009 # return stale catalog information, or not do refreshes when
1011 self
.cfg
.write_sys_cfg()
1013 self
.__load
_publisher
_ssl
()
1015 # Configuration shouldn't be written again unless this
1016 # is an image creation operation (hence the purge).
1019 # Let the linked image subsystem know that root is moving
1020 self
.linked
._init
_root
()
1022 # load image avoid pkg set
1023 self
.__avoid
_set
_load
()
1025 def update_format(self
, allow_unprivileged
=False, progtrack
=None):
1026 """Transform the existing image structure and its data to
1027 the newest format. Callers are responsible for locking.
1029 'allow_unprivileged' is an optional boolean indicating
1030 whether a fallback to an in-memory only upgrade should
1031 be performed if a PermissionsException is encountered
1032 during the operation.
1034 'progtrack' is an optional ProgressTracker object.
1037 if self
.version
== self
.CURRENT_VERSION
:
1039 self
.__upgraded
= True
1041 # If pre-upgrade data still exists; fire off a
1042 # process to dump it so execution can continue.
1043 orig_root
= self
.imgdir
+ ".old"
1044 nullf
= open(os
.devnull
, "w")
1045 if os
.path
.exists(orig_root
):
1046 # Ensure all output is discarded; it really
1047 # doesn't matter if this succeeds.
1048 subprocess
.Popen("rm -rf {0}".format(orig_root
),
1049 shell
=True, stdout
=nullf
, stderr
=nullf
)
1053 progtrack
= progress
.NullProgressTracker()
1055 # Not technically 'caching', but close enough ...
1056 progtrack
.cache_catalogs_start()
1058 # Upgrade catalog data if needed.
1059 self
.__upgrade
_catalogs
()
1061 # Data conversion finished.
1062 self
.__upgraded
= True
1064 # Determine if on-disk portion of the upgrade is allowed.
1065 if self
.allow_ondisk_upgrade
== False:
1068 if self
.allow_ondisk_upgrade
is None and self
.type != IMG_USER
:
1069 if not self
.is_liveroot() and not self
.is_zone():
1070 # By default, don't update image format if it
1071 # is not the live root, and is not for a zone.
1072 self
.allow_ondisk_upgrade
= False
1075 # The logic to perform the on-disk upgrade is in its own
1076 # function so that it can easily be wrapped with locking logic.
1077 with self
.locked_op("update-format",
1078 allow_unprivileged
=allow_unprivileged
):
1079 self
.__upgrade
_image
_format
(progtrack
,
1080 allow_unprivileged
=allow_unprivileged
)
1082 progtrack
.cache_catalogs_done()
1085 def __upgrade_catalogs(self
):
1086 """Private helper function for update_format."""
1088 if self
.version
>= 3:
1092 def installed_file_publisher(filepath
):
1093 """Find the pkg's installed file named by filepath.
1094 Return the publisher that installed this package."""
1098 flines
= f
.readlines()
1099 version
, pub
= flines
1100 version
= version
.strip()
1104 # If ValueError occurs, the installed file is of
1105 # a previous format. For upgrades to work, it's
1106 # necessary to assume that the package was
1107 # installed from the highest ranked publisher.
1108 # Here, the publisher is setup to record that.
1112 newpub
= "{0}_{1}".format(
1113 pkg
.fmri
.PREF_PUB_PFX
, pub
)
1115 newpub
= "{0}_{1}".format(
1116 pkg
.fmri
.PREF_PUB_PFX
,
1117 self
.get_highest_ranked_publisher())
1122 # First, load the old package state information.
1123 installed_state_dir
= "{0}/state/installed".format(self
.imgdir
)
1125 # If the state directory structure has already been created,
1126 # loading information from it is fast. The directory is
1127 # populated with files, named by their (url-encoded) FMRI,
1128 # which point to the "installed" file in the corresponding
1129 # directory under /var/pkg.
1131 def add_installed_entry(f
):
1132 path
= "{0}/pkg/{1}/installed".format(
1133 self
.imgdir
, f
.get_dir_path())
1134 pub
= installed_file_publisher(path
)
1135 f
.set_publisher(pub
)
1136 installed
[f
.pkg_name
] = f
1138 for pl
in os
.listdir(installed_state_dir
):
1139 fmristr
= "{0}".format(urllib
.unquote(pl
))
1140 f
= pkg
.fmri
.PkgFmri(fmristr
)
1141 add_installed_entry(f
)
1143 # Create the new image catalogs.
1144 kcat
= pkg
.catalog
.Catalog(batch_mode
=True,
1145 manifest_cb
=self
._manifest
_cb
, sign
=False)
1146 icat
= pkg
.catalog
.Catalog(batch_mode
=True,
1147 manifest_cb
=self
._manifest
_cb
, sign
=False)
1149 # XXX For backwards compatibility, 'upgradability' of packages
1150 # is calculated and stored based on whether a given pkg stem
1151 # matches the newest version in the catalog. This is quite
1152 # expensive (due to overhead), but at least the cost is
1153 # consolidated here. This comparison is also cross-publisher,
1157 for pub
in self
.gen_publishers():
1159 old_cat
= pkg
.server
.catalog
.ServerCatalog(
1160 pub
.meta_root
, read_only
=True,
1161 publisher
=pub
.prefix
)
1163 old_pub_cats
.append((pub
, old_cat
))
1164 for f
in old_cat
.fmris():
1165 nver
= newest
.get(f
.pkg_name
, None)
1166 newest
[f
.pkg_name
] = max(nver
,
1169 except EnvironmentError as e
:
1170 # If a catalog file is just missing, ignore it.
1171 # If there's a worse error, make sure the user
1173 if e
.errno
!= errno
.ENOENT
:
1176 # Next, load the existing catalog data and convert it.
1178 for pub
, old_cat
in old_pub_cats
:
1179 new_cat
= pub
.catalog
1180 new_cat
.batch_mode
= True
1181 new_cat
.sign
= False
1185 # First convert the old publisher catalog to
1187 for f
in old_cat
.fmris():
1188 new_cat
.add_package(f
)
1190 # Now populate the image catalogs.
1191 states
= [pkgdefs
.PKG_STATE_KNOWN
,
1192 pkgdefs
.PKG_STATE_V0
]
1193 mdata
= { "states": states
}
1194 if f
.version
!= newest
[f
.pkg_name
]:
1196 pkgdefs
.PKG_STATE_UPGRADABLE
)
1198 inst_fmri
= installed
.get(f
.pkg_name
, None)
1200 inst_fmri
.version
== f
.version
and \
1201 pkg
.fmri
.is_same_publisher(f
.publisher
,
1202 inst_fmri
.publisher
):
1204 pkgdefs
.PKG_STATE_INSTALLED
)
1205 if inst_fmri
.preferred_publisher():
1206 # Strip the PREF_PUB_PFX.
1207 inst_fmri
.set_publisher(
1208 inst_fmri
.get_publisher())
1209 icat
.add_package(f
, metadata
=mdata
)
1210 del installed
[f
.pkg_name
]
1211 kcat
.add_package(f
, metadata
=mdata
)
1213 # Normally, the Catalog's attributes are automatically
1214 # populated as a result of catalog operations. But in
1215 # this case, the new Catalog's attributes should match
1216 # those of the old catalog.
1217 old_lm
= old_cat
.last_modified()
1219 # Can be None for empty v0 catalogs.
1220 old_lm
= pkg
.catalog
.ts_to_datetime(old_lm
)
1221 new_cat
.last_modified
= old_lm
1224 # Add to the list of catalogs to save.
1225 new_cat
.batch_mode
= False
1226 pub_cats
.append(new_cat
)
1228 # Discard the old catalog objects.
1231 for f
in installed
.values():
1232 # Any remaining FMRIs need to be added to all of the
1234 states
= [pkgdefs
.PKG_STATE_INSTALLED
,
1235 pkgdefs
.PKG_STATE_V0
]
1236 mdata
= { "states": states
}
1237 # This package may be installed from a publisher that
1238 # is no longer known or has been disabled.
1239 if f
.pkg_name
in newest
and \
1240 f
.version
!= newest
[f
.pkg_name
]:
1241 states
.append(pkgdefs
.PKG_STATE_UPGRADABLE
)
1243 if f
.preferred_publisher():
1244 # Strip the PREF_PUB_PFX.
1245 f
.set_publisher(f
.get_publisher())
1247 icat
.add_package(f
, metadata
=mdata
)
1248 kcat
.add_package(f
, metadata
=mdata
)
1250 for cat
in pub_cats
+ [kcat
, icat
]:
1253 # Cache converted catalogs so that operations can function as
1254 # expected if the on-disk format of the catalogs isn't upgraded.
1255 self
.__catalogs
[self
.IMG_CATALOG_KNOWN
] = kcat
1256 self
.__catalogs
[self
.IMG_CATALOG_INSTALLED
] = icat
1258 def __upgrade_image_format(self
, progtrack
, allow_unprivileged
=False):
1259 """Private helper function for update_format."""
1262 # Ensure Image directory structure is valid.
1264 except apx
.PermissionsException
as e
:
1265 if not allow_unprivileged
:
1267 # An unprivileged user is attempting to use the
1268 # new client with an old image. Since none of
1269 # the changes can be saved, warn the user and
1272 # Raising an exception here would be a decidedly
1273 # bad thing as it would disrupt find_root, etc.
1276 # This has to be done after the permissions check above.
1277 # First, create a new temporary root to store the converted
1279 tmp_root
= self
.imgdir
+ ".new"
1281 shutil
.rmtree(tmp_root
)
1282 except EnvironmentError as e
:
1283 if e
.errno
in (errno
.EROFS
, errno
.EPERM
) and \
1287 if e
.errno
!= errno
.ENOENT
:
1288 raise apx
._convert
_error
(e
)
1291 self
.mkdirs(root
=tmp_root
, version
=self
.CURRENT_VERSION
)
1292 except apx
.PermissionsException
as e
:
1293 # Same handling needed as above; but not after this.
1294 if not allow_unprivileged
:
1298 def linktree(src_root
, dest_root
):
1299 if not os
.path
.exists(src_root
):
1303 for entry
in os
.listdir(src_root
):
1304 src
= os
.path
.join(src_root
, entry
)
1305 dest
= os
.path
.join(dest_root
, entry
)
1306 if os
.path
.isdir(src
):
1307 # Recurse into directory to link
1312 # Link source file into target dest.
1313 assert os
.path
.isfile(src
)
1316 except EnvironmentError as e
:
1317 raise apx
._convert
_error
(e
)
1319 # Next, link history data into place.
1320 linktree(self
.history
.path
, os
.path
.join(tmp_root
,
1323 # Next, link index data into place.
1324 linktree(self
.index_dir
, os
.path
.join(tmp_root
,
1327 # Next, link ssl data into place.
1328 linktree(os
.path
.join(self
.imgdir
, "ssl"),
1329 os
.path
.join(tmp_root
, "ssl"))
1331 # Next, write state data into place.
1332 if self
.version
< 3:
1333 # Image state and publisher metadata
1334 tmp_state_root
= os
.path
.join(tmp_root
, "state")
1336 # Update image catalog locations.
1337 kcat
= self
.get_catalog(self
.IMG_CATALOG_KNOWN
)
1338 icat
= self
.get_catalog(self
.IMG_CATALOG_INSTALLED
)
1339 kcat
.meta_root
= os
.path
.join(tmp_state_root
,
1340 self
.IMG_CATALOG_KNOWN
)
1341 icat
.meta_root
= os
.path
.join(tmp_state_root
,
1342 self
.IMG_CATALOG_INSTALLED
)
1344 # Assume that since mkdirs succeeded that the remaining
1345 # data can be saved and the image structure can be
1346 # upgraded. But first, attempt to save the image
1348 for cat
in icat
, kcat
:
1349 misc
.makedirs(cat
.meta_root
)
1352 # For version 3 and newer images, just link existing
1353 # state information into place.
1354 linktree(self
._statedir
, os
.path
.join(tmp_root
,
1357 # Reset each publisher's meta_root and ensure its complete
1358 # directory structure is intact. Then either link in or
1359 # write out the metadata for each publisher.
1360 for pub
in self
.gen_publishers():
1361 old_root
= pub
.meta_root
1362 old_cat_root
= pub
.catalog_root
1363 old_cert_root
= pub
.cert_root
1364 pub
.meta_root
= os
.path
.join(tmp_root
,
1365 IMG_PUB_DIR
, pub
.prefix
)
1366 pub
.create_meta_root()
1368 if self
.version
< 3:
1369 # Should be loaded in memory and transformed
1370 # already, so just need to be written out.
1374 # Now link any catalog or cert files from the old root
1375 # into the new root.
1376 linktree(old_cat_root
, pub
.catalog_root
)
1377 linktree(old_cert_root
, pub
.cert_root
)
1379 # Finally, create a directory for the publisher's
1380 # manifests to live in.
1381 misc
.makedirs(os
.path
.join(pub
.meta_root
, "pkg"))
1383 # Next, link licenses and manifests of installed packages into
1385 for pfmri
in self
.gen_installed_pkgs():
1387 mdir
= self
.get_manifest_dir(pfmri
)
1388 for entry
in os
.listdir(mdir
):
1389 if not entry
.startswith("license."):
1391 src
= os
.path
.join(mdir
, entry
)
1392 if os
.path
.isdir(src
):
1393 # Ignore broken licenses.
1396 # For conversion, ensure destination link uses
1397 # encoded license name to match how new image
1398 # format stores licenses.
1399 dest
= os
.path
.join(tmp_root
, "license",
1400 pfmri
.get_dir_path(stemonly
=True),
1401 urllib
.quote(entry
, ""))
1402 misc
.makedirs(os
.path
.dirname(dest
))
1405 except EnvironmentError as e
:
1406 raise apx
._convert
_error
(e
)
1409 src
= self
.get_manifest_path(pfmri
)
1410 dest
= os
.path
.join(tmp_root
, "publisher",
1411 pfmri
.publisher
, "pkg", pfmri
.get_dir_path())
1412 misc
.makedirs(os
.path
.dirname(dest
))
1415 except EnvironmentError as e
:
1416 raise apx
._convert
_error
(e
)
1418 # Next, copy the old configuration into the new location using
1419 # the new name. The configuration is copied instead of being
1420 # linked so that any changes to configuration as a result of
1421 # the upgrade won't be written into the old image directory.
1422 src
= os
.path
.join(self
.imgdir
, "disabled_auth")
1423 if os
.path
.exists(src
):
1424 dest
= os
.path
.join(tmp_root
, "disabled_auth")
1425 portable
.copyfile(src
, dest
)
1427 src
= self
.cfg
.target
1428 dest
= os
.path
.join(tmp_root
, "pkg5.image")
1430 portable
.copyfile(src
, dest
)
1431 except EnvironmentError as e
:
1432 raise apx
._convert
_error
(e
)
1434 # Update the new configuration's version information and then
1435 # write it out again.
1436 newcfg
= imageconfig
.ImageConfig(dest
, tmp_root
,
1437 version
=3, overrides
={ "image": {
1438 "version": self
.CURRENT_VERSION
} })
1442 # Now reload configuration and write again to configuration data
1443 # reflects updated version information.
1447 # Finally, rename the old package metadata directory, then
1448 # rename the new one into place, and then reinitialize. The
1449 # old data will be dumped during initialization.
1450 orig_root
= self
.imgdir
+ ".old"
1452 portable
.rename(self
.imgdir
, orig_root
)
1453 portable
.rename(tmp_root
, self
.imgdir
)
1455 # /var/pkg/repo is renamed into place instead of being
1456 # linked piece-by-piece for performance reasons.
1457 # Crawling the entire tree structure of a repository is
1458 # far slower than simply renaming the top level
1459 # directory (since it often has thousands or millions
1461 old_repo
= os
.path
.join(orig_root
, "repo")
1462 if os
.path
.exists(old_repo
):
1463 new_repo
= os
.path
.join(tmp_root
, "repo")
1464 portable
.rename(old_repo
, new_repo
)
1465 except EnvironmentError as e
:
1466 raise apx
._convert
_error
(e
)
1467 self
.find_root(self
.root
, exact_match
=True, progtrack
=progtrack
)
1469 def create(self
, pubs
, facets
=EmptyDict
, is_zone
=False, progtrack
=None,
1470 props
=EmptyDict
, refresh_allowed
=True, variants
=EmptyDict
):
1471 """Creates a new image with the given attributes if it does not
1472 exist; should not be used with an existing image.
1474 'is_zone' is a boolean indicating whether the image is a zone.
1476 'pubs' is a list of Publisher objects to configure the image
1479 'refresh_allowed' is an optional boolean indicating that
1480 network operations (such as publisher data retrieval) are
1483 'progtrack' is an optional ProgressTracker object.
1485 'props' is an option dictionary mapping image property names to
1488 'variants' is an optional dictionary of variant names and
1491 'facets' is an optional dictionary of facet names and values.
1495 p
.meta_root
= self
._get
_publisher
_meta
_root
(p
.prefix
)
1496 p
.transport
= self
.transport
1498 # Override any initial configuration information.
1499 self
.set_properties(props
)
1501 # Start the operation.
1502 self
.history
.log_operation_start("image-create")
1504 # Determine and add the default variants for the image.
1506 self
.cfg
.variants
["variant.opensolaris.zone"] = \
1509 self
.cfg
.variants
["variant.opensolaris.zone"] = \
1512 self
.cfg
.variants
["variant.arch"] = \
1513 variants
.get("variant.arch", platform
.processor())
1515 # After setting up the default variants, add any overrides or
1516 # additional variants or facets specified.
1517 self
.cfg
.variants
.update(variants
)
1518 self
.cfg
.facets
.update(facets
)
1520 # Now everything is ready for publisher configuration.
1521 # Since multiple publishers are allowed, they are all
1522 # added at once without any publisher data retrieval.
1523 # A single retrieval is then performed afterwards, if
1524 # allowed, to minimize the amount of work the client
1527 self
.add_publisher(p
, refresh_allowed
=False,
1528 progtrack
=progtrack
)
1531 self
.refresh_publishers(progtrack
=progtrack
,
1534 # initialize empty catalogs on disk
1535 self
.__rebuild
_image
_catalogs
(progtrack
=progtrack
)
1537 self
.cfg
.set_property("property", "publisher-search-order",
1538 [p
.prefix
for p
in pubs
])
1540 # Ensure publisher search order is written.
1543 self
.history
.log_operation_end()
1546 def __allow_liveroot():
1547 """Check if we're allowed to access the current live root
1550 # if we're simulating a live root then allow access to it
1551 if DebugValues
.get_value("simulate_live_root") or \
1552 "PKG_LIVE_ROOT" in os
.environ
:
1555 # check if the user disabled access to the live root
1556 if DebugValues
.get_value("simulate_no_live_root"):
1558 if "PKG_NO_LIVE_ROOT" in os
.environ
:
1561 # by default allow access to the live root
1564 def is_liveroot(self
):
1565 return bool(self
.root
== misc
.liveroot())
1568 return self
.cfg
.variants
["variant.opensolaris.zone"] == \
1572 return self
.cfg
.variants
["variant.arch"]
1574 def has_boot_archive(self
):
1575 """Returns True if a boot_archive is present in this image"""
1576 if self
.__boot
_archive
is not None:
1577 return self
.__boot
_archive
1579 for p
in ["platform/i86pc/amd64/boot_archive",
1580 "platform/i86pc/boot_archive",
1581 "platform/sun4u/boot_archive",
1582 "platform/sun4v/boot_archive"]:
1583 if os
.path
.isfile(os
.path
.join(self
.root
, p
)):
1584 self
.__boot
_archive
= True
1587 self
.__boot
_archive
= False
1588 return self
.__boot
_archive
1590 def get_ramdisk_filelist(self
):
1591 """return the filelist... add the filelist so we rebuild
1592 boot archive if it changes... append trailing / to
1593 directories that are really there"""
1595 p
= "boot/solaris/filelist.ramdisk"
1596 f
= os
.path
.join(self
.root
, p
)
1599 if os
.path
.isdir(os
.path
.join(self
.root
, path
)):
1603 if not os
.path
.isfile(f
):
1606 return [ addslash(l
.strip()) for l
in file(f
) ] + [p
]
1608 def get_cachedirs(self
):
1609 """Returns a list of tuples of the form (dir, readonly, pub,
1610 layout) where 'dir' is the absolute path of the cache directory,
1611 'readonly' is a boolean indicating whether the cache can
1612 be written to, 'pub' is the prefix of the publisher that
1613 the cache directory should be used for, and 'layout' is a
1614 FileManager object used to access file content in the cache.
1615 If 'pub' is None, the cache directory is intended for all
1616 publishers. If 'layout' is None, file content layout can
1621 if self
.version
>= 4:
1622 # Assume cache directories are in V1 Layout if image
1624 file_layout
= fl
.V1Layout()
1626 # Get all readonly cache directories.
1628 (cdir
, True, None, file_layout
)
1629 for cdir
in self
.__read
_cache
_dirs
1632 # Get global write cache directory.
1633 if self
.__write
_cache
_dir
:
1634 cdirs
.append((self
.__write
_cache
_dir
, False, None,
1637 # For images newer than version 3, file data can be stored
1638 # in the publisher's file root.
1639 if self
.version
== self
.CURRENT_VERSION
:
1640 for pub
in self
.gen_publishers(inc_disabled
=True):
1641 froot
= os
.path
.join(pub
.meta_root
, "file")
1643 if self
.__write
_cache
_dir
or \
1644 self
.__write
_cache
_root
:
1646 cdirs
.append((froot
, readonly
, pub
.prefix
,
1649 if self
.__write
_cache
_root
:
1650 # Cache is a tree structure like
1651 # /var/pkg/publisher.
1652 froot
= os
.path
.join(
1653 self
.__write
_cache
_root
, pub
.prefix
,
1655 cdirs
.append((froot
, False, pub
.prefix
,
1663 def get_last_modified(self
, string
=False):
1664 """Return the UTC time of the image's last state change or
1665 None if unknown. By default the time is returned via datetime
1666 object. If 'string' is true and a time is available, then the
1667 time is returned as a string (instead of as a datetime
1670 # Always get last_modified time from known catalog. It's
1671 # retrieved from the catalog itself since that is accurate
1672 # down to the micrsecond (as opposed to the filesystem which
1673 # has an OS-specific resolution).
1674 rv
= self
.__get
_catalog
(self
.IMG_CATALOG_KNOWN
).last_modified
1675 if rv
is None or not string
:
1677 return rv
.strftime("%Y-%m-%dT%H:%M:%S.%f")
1679 def gen_publishers(self
, inc_disabled
=False):
1681 raise apx
.ImageCfgEmptyError(self
.root
)
1684 if self
.__alt
_pkg
_pub
_map
:
1685 alt_src_pubs
= dict(
1687 for p
in self
.__alt
_pubs
1690 for pfx
in self
.__alt
_known
_cat
.publishers():
1691 # Include alternate package source publishers
1692 # in result, and temporarily enable any
1693 # disabled publishers that already exist in
1694 # the image configuration.
1696 img_pub
= self
.cfg
.publishers
[pfx
]
1698 if not img_pub
.disabled
:
1699 # No override needed.
1701 new_pub
= copy
.copy(img_pub
)
1702 new_pub
.disabled
= False
1704 # Discard origins and mirrors to prevent
1705 # their accidental use.
1706 repo
= new_pub
.repository
1707 repo
.reset_origins()
1708 repo
.reset_mirrors()
1710 new_pub
= alt_src_pubs
[pfx
]
1712 alt_pubs
[pfx
] = new_pub
1715 alt_pubs
.get(p
.prefix
, p
)
1716 for p
in self
.cfg
.publishers
.values()
1719 p
for p
in alt_pubs
.values()
1720 if p
not in publishers
1723 for pub
in publishers
:
1724 # Prepare publishers for transport usage; this must be
1725 # done each time so that information reflects current
1726 # image state. This is done whether or not the
1727 # publisher is returned so that in-memory state is
1729 pub
.meta_root
= self
._get
_publisher
_meta
_root
(
1731 pub
.transport
= self
.transport
1732 if inc_disabled
or not pub
.disabled
:
1735 def get_publisher_ranks(self
):
1736 """Return dictionary of configured + enabled publishers and
1737 unconfigured publishers which still have packages installed.
1739 Each entry contains a tuple of search order index starting at
1740 0, and a boolean indicating whether or not this publisher is
1741 "sticky", and a boolean indicating whether or not the
1742 publisher is enabled"""
1744 pubs
= self
.get_sorted_publishers(inc_disabled
=False)
1746 (pubs
[i
].prefix
, (i
, pubs
[i
].sticky
, True))
1747 for i
in range(0, len(pubs
))
1750 # Add any publishers for pkgs that are installed,
1751 # but have been deleted. These publishers are implicitly
1752 # not-sticky and disabled.
1753 for pub
in self
.get_installed_pubs():
1755 ret
.setdefault(pub
, (i
, False, False))
1758 def get_highest_ranked_publisher(self
):
1759 """Return the highest ranked publisher."""
1761 pubs
= self
.cfg
.get_property("property",
1762 "publisher-search-order")
1764 return self
.get_publisher(prefix
=pubs
[0])
1765 for p
in self
.gen_publishers():
1767 for p
in self
.get_installed_pubs():
1768 return publisher
.Publisher(p
)
1771 def check_cert_validity(self
, pubs
=EmptyI
):
1772 """Validate the certificates of the specified publishers.
1774 Raise an exception if any of the certificates has expired or
1775 is close to expiring."""
1778 pubs
= self
.gen_publishers()
1783 for uri
in r
.origins
:
1786 misc
.validate_ssl_cert(
1790 except apx
.ExpiredCertificate
as e
:
1795 if not os
.path
.exists(
1797 raise apx
.NoSuchKey(
1801 except EnvironmentError as e
:
1802 raise apx
._convert
_error
(e
)
1805 raise apx
.ExpiredCertificates(errors
)
1807 def has_publisher(self
, prefix
=None, alias
=None):
1808 """Returns a boolean value indicating whether a publisher
1809 exists in the image configuration that matches the given
1811 for pub
in self
.gen_publishers(inc_disabled
=True):
1812 if prefix
== pub
.prefix
or (alias
and
1813 alias
== pub
.alias
):
1817 def remove_publisher(self
, prefix
=None, alias
=None, progtrack
=None):
1818 """Removes the publisher with the matching identity from the
1822 progtrack
= progress
.NullProgressTracker()
1824 with self
.locked_op("remove-publisher"):
1825 pub
= self
.get_publisher(prefix
=prefix
,
1828 self
.cfg
.remove_publisher(pub
.prefix
)
1829 self
.remove_publisher_metadata(pub
, progtrack
=progtrack
)
1832 def get_publishers(self
, inc_disabled
=True):
1833 """Return a dictionary of configured publishers. This doesn't
1834 include unconfigured publishers which still have packages
1839 for p
in self
.gen_publishers(inc_disabled
=inc_disabled
)
1842 def get_sorted_publishers(self
, inc_disabled
=True):
1843 """Return a list of configured publishers sorted by rank.
1844 This doesn't include unconfigured publishers which still have
1845 packages installed."""
1847 d
= self
.get_publishers(inc_disabled
=inc_disabled
)
1848 names
= self
.cfg
.get_property("property",
1849 "publisher-search-order")
1852 # If someone has been editing the config file we may have
1853 # unranked publishers. Also, as publisher come and go via the
1854 # sysrepo we can end up with configured but unranked
1855 # publishers. In either case just sort unranked publishers
1858 unranked
= set(d
) - set(names
)
1865 for n
in sorted(unranked
)
1869 def get_publisher(self
, prefix
=None, alias
=None, origin
=None):
1870 for pub
in self
.gen_publishers(inc_disabled
=True):
1871 if prefix
and prefix
== pub
.prefix
:
1873 elif alias
and alias
== pub
.alias
:
1875 elif origin
and pub
.repository
and \
1876 pub
.repository
.has_origin(origin
):
1878 raise apx
.UnknownPublisher(max(prefix
, alias
, origin
))
1880 def pub_search_before(self
, being_moved
, staying_put
):
1881 """Moves publisher "being_moved" to before "staying_put"
1884 The caller is responsible for locking the image."""
1886 self
.cfg
.change_publisher_search_order(being_moved
, staying_put
,
1889 def pub_search_after(self
, being_moved
, staying_put
):
1890 """Moves publisher "being_moved" to after "staying_put"
1893 The caller is responsible for locking the image."""
1895 self
.cfg
.change_publisher_search_order(being_moved
, staying_put
,
1898 def __apply_alt_pkg_sources(self
, img_kcat
):
1899 pkg_pub_map
= self
.__alt
_pkg
_pub
_map
1900 if not pkg_pub_map
or self
.__alt
_pkg
_sources
_loaded
:
1901 # No alternate sources to merge.
1904 # Temporarily merge the package metadata in the alternate
1905 # known package catalog for packages not listed in the
1906 # image's known catalog.
1907 def merge_check(alt_kcat
, pfmri
, new_entry
):
1908 states
= new_entry
["metadata"]["states"]
1909 if pkgdefs
.PKG_STATE_INSTALLED
in states
:
1910 # Not interesting; already installed.
1912 img_entry
= img_kcat
.get_entry(pfmri
=pfmri
)
1913 if not img_entry
is None:
1914 # Already in image known catalog.
1916 return True, new_entry
1918 img_kcat
.append(self
.__alt
_known
_cat
, cb
=merge_check
)
1921 self
.__alt
_pkg
_sources
_loaded
= True
1922 self
.transport
.cfg
.pkg_pub_map
= self
.__alt
_pkg
_pub
_map
1923 self
.transport
.cfg
.alt_pubs
= self
.__alt
_pubs
1924 self
.transport
.cfg
.reset_caches()
1926 def __cleanup_alt_pkg_certs(self
):
1927 """Private helper function to cleanup package certificate
1928 information after use of temporary package data."""
1930 if not self
.__alt
_pubs
:
1933 # Cleanup publisher cert information; any certs not retrieved
1934 # retrieved during temporary publisher use need to be expunged
1935 # from the image configuration.
1936 for pub
in self
.__alt
_pubs
:
1938 ipub
= self
.cfg
.publishers
[pub
.prefix
]
1943 def set_alt_pkg_sources(self
, alt_sources
):
1944 """Specifies an alternate source of package metadata to be
1945 temporarily merged with image state so that it can be used
1946 as part of packaging operations."""
1949 self
.__init
_catalogs
()
1950 self
.__alt
_pkg
_pub
_map
= None
1951 self
.__alt
_pubs
= None
1952 self
.__alt
_known
_cat
= None
1953 self
.__alt
_pkg
_sources
_loaded
= False
1954 self
.transport
.cfg
.pkg_pub_map
= None
1955 self
.transport
.cfg
.alt_pubs
= None
1956 self
.transport
.cfg
.reset_caches()
1958 elif self
.__alt
_pkg
_sources
_loaded
:
1959 # Ensure existing alternate package source data
1960 # is not part of temporary image state.
1961 self
.__init
_catalogs
()
1963 pkg_pub_map
, alt_pubs
, alt_kcat
, ignored
= alt_sources
1964 self
.__alt
_pkg
_pub
_map
= pkg_pub_map
1965 self
.__alt
_pubs
= alt_pubs
1966 self
.__alt
_known
_cat
= alt_kcat
1968 def set_highest_ranked_publisher(self
, prefix
=None, alias
=None,
1970 """Sets the preferred publisher for packaging operations.
1972 'prefix' is an optional string value specifying the name of
1973 a publisher; ignored if 'pub' is provided.
1975 'alias' is an optional string value specifying the alias of
1976 a publisher; ignored if 'pub' is provided.
1978 'pub' is an optional Publisher object identifying the
1979 publisher to set as the preferred publisher.
1981 One of the above parameters must be provided.
1983 The caller is responsible for locking the image."""
1986 pub
= self
.get_publisher(prefix
=prefix
, alias
=alias
)
1987 if not self
.cfg
.allowed_to_move(pub
):
1988 raise apx
.ModifyingSyspubException(_("Publisher '{0}' "
1989 "is a system publisher and cannot be "
1990 "moved.").format(pub
))
1992 pubs
= self
.get_sorted_publishers()
1995 # If we've gotten to the publisher we want to make
1996 # highest ranked, then there's nothing to do because
1997 # it's already as high as it can be.
2000 if self
.cfg
.allowed_to_move(p
):
2003 assert relative
, "Expected {0} to already be part of the " + \
2004 "search order:{1}".format(relative
, ranks
)
2005 self
.cfg
.change_publisher_search_order(pub
.prefix
,
2006 relative
.prefix
, after
=False)
2008 def set_property(self
, prop_name
, prop_value
):
2009 with self
.locked_op("set-property"):
2010 self
.cfg
.set_property("property", prop_name
,
2014 def set_properties(self
, properties
):
2015 properties
= { "property": properties
}
2016 with self
.locked_op("set-property"):
2017 self
.cfg
.set_properties(properties
)
2020 def get_property(self
, prop_name
):
2021 return self
.cfg
.get_property("property", prop_name
)
2023 def has_property(self
, prop_name
):
2025 self
.cfg
.get_property("property", prop_name
)
2027 except cfg
.ConfigError
:
2030 def delete_property(self
, prop_name
):
2031 with self
.locked_op("unset-property"):
2032 self
.cfg
.remove_property("property", prop_name
)
2035 def add_property_value(self
, prop_name
, prop_value
):
2036 with self
.locked_op("add-property-value"):
2037 self
.cfg
.add_property_value("property", prop_name
,
2041 def remove_property_value(self
, prop_name
, prop_value
):
2042 with self
.locked_op("remove-property-value"):
2043 self
.cfg
.remove_property_value("property", prop_name
,
2048 """Destroys the image; image object should not be used
2051 if not self
.imgdir
or not os
.path
.exists(self
.imgdir
):
2054 if os
.path
.abspath(self
.imgdir
) == "/":
2059 shutil
.rmtree(self
.imgdir
)
2060 except EnvironmentError as e
:
2061 raise apx
._convert
_error
(e
)
2063 def properties(self
):
2065 raise apx
.ImageCfgEmptyError(self
.root
)
2066 return self
.cfg
.get_index()["property"].keys()
2068 def add_publisher(self
, pub
, refresh_allowed
=True, progtrack
=None,
2069 approved_cas
=EmptyI
, revoked_cas
=EmptyI
, search_after
=None,
2070 search_before
=None, search_first
=None, unset_cas
=EmptyI
):
2071 """Adds the provided publisher object to the image
2074 'refresh_allowed' is an optional, boolean value indicating
2075 whether the publisher's metadata should be retrieved when adding
2076 it to the image's configuration.
2078 'progtrack' is an optional ProgressTracker object."""
2080 with self
.locked_op("add-publisher"):
2081 return self
.__add
_publisher
(pub
,
2082 refresh_allowed
=refresh_allowed
,
2083 progtrack
=progtrack
, approved_cas
=EmptyI
,
2084 revoked_cas
=EmptyI
, search_after
=search_after
,
2085 search_before
=search_before
,
2086 search_first
=search_first
, unset_cas
=EmptyI
)
2088 def __update_publisher_catalogs(self
, pub
, progtrack
=None,
2089 refresh_allowed
=True):
2090 # Ensure that if the publisher's meta directory already
2091 # exists for some reason that the data within is not
2093 self
.remove_publisher_metadata(pub
, progtrack
=progtrack
,
2096 repo
= pub
.repository
2097 if refresh_allowed
and repo
.origins
:
2099 # First, verify that the publisher has a
2100 # valid pkg(5) repository.
2101 self
.transport
.valid_publisher_test(pub
)
2102 pub
.validate_config()
2103 self
.refresh_publishers(pubs
=[pub
],
2104 progtrack
=progtrack
)
2105 except Exception as e
:
2106 # Remove the newly added publisher since
2107 # it is invalid or the retrieval failed.
2109 self
.cfg
.remove_publisher(pub
.prefix
)
2112 # Remove the newly added publisher since
2113 # the retrieval failed.
2115 self
.cfg
.remove_publisher(pub
.prefix
)
2118 def __add_publisher(self
, pub
, refresh_allowed
=True, progtrack
=None,
2119 approved_cas
=EmptyI
, revoked_cas
=EmptyI
, search_after
=None,
2120 search_before
=None, search_first
=None, unset_cas
=EmptyI
):
2121 """Private version of add_publisher(); caller is responsible
2124 assert (not search_after
and not search_before
) or \
2125 (not search_after
and not search_first
) or \
2126 (not search_before
and not search_first
)
2128 if self
.version
< self
.CURRENT_VERSION
:
2129 raise apx
.ImageFormatUpdateNeeded(self
.root
)
2131 for p
in self
.cfg
.publishers
.values():
2132 if pub
.prefix
== p
.prefix
or \
2133 pub
.prefix
== p
.alias
or \
2134 pub
.alias
and (pub
.alias
== p
.alias
or
2135 pub
.alias
== p
.prefix
):
2136 raise apx
.DuplicatePublisher(pub
)
2139 progtrack
= progress
.NullProgressTracker()
2141 # Must assign this first before performing operations.
2142 pub
.meta_root
= self
._get
_publisher
_meta
_root
(
2144 pub
.transport
= self
.transport
2146 # Before continuing, validate SSL information.
2148 self
.check_cert_validity(pubs
=[pub
])
2149 except apx
.ExpiringCertificate
as e
:
2150 logger
.error(str(e
))
2152 self
.cfg
.publishers
[pub
.prefix
] = pub
2154 self
.__update
_publisher
_catalogs
(pub
, progtrack
=progtrack
,
2155 refresh_allowed
=refresh_allowed
)
2157 for ca
in approved_cas
:
2159 ca
= os
.path
.abspath(ca
)
2163 except EnvironmentError as e
:
2164 if e
.errno
== errno
.ENOENT
:
2165 raise apx
.MissingFileArgumentException(
2167 raise apx
._convert
_error
(e
)
2168 pub
.approve_ca_cert(s
, manual
=True)
2170 for hsh
in revoked_cas
:
2171 pub
.revoke_ca_cert(hsh
)
2173 for hsh
in unset_cas
:
2174 pub
.unset_ca_cert(hsh
)
2177 self
.set_highest_ranked_publisher(prefix
=pub
.prefix
)
2179 self
.pub_search_before(pub
.prefix
, search_before
)
2181 self
.pub_search_after(pub
.prefix
, search_after
)
2183 # Only after success should the configuration be saved.
2186 def verify(self
, fmri
, progresstracker
, **kwargs
):
2187 """Generator that returns a tuple of the form (action, errors,
2188 warnings, info) if there are any error, warning, or other
2189 messages about an action contained within the specified
2190 package. Where the returned messages are lists of strings
2191 indicating fatal problems, potential issues (that can be
2192 ignored), or extra information to be displayed respectively.
2194 'fmri' is the fmri of the package to verify.
2196 'progresstracker' is a ProgressTracker object.
2198 'kwargs' is a dict of additional keyword arguments to be passed
2199 to each action verification routine."""
2202 pub
= self
.get_publisher(prefix
=fmri
.publisher
)
2203 except apx
.UnknownPublisher
:
2204 # Since user removed publisher, assume this is the same
2205 # as if they had set signature-policy ignore for the
2209 sig_pol
= self
.signature_policy
.combine(
2210 pub
.signature_policy
)
2212 progresstracker
.plan_add_progress(
2213 progresstracker
.PLAN_PKG_VERIFY
)
2214 manf
= self
.get_manifest(fmri
, ignore_excludes
=True)
2215 sigs
= list(manf
.gen_actions_by_type("signature",
2216 excludes
=self
.list_excludes()))
2217 if sig_pol
and (sigs
or sig_pol
.name
!= "ignore"):
2218 # Only perform signature verification logic if there are
2219 # signatures or if signature-policy is not 'ignore'.
2221 # Signature verification must be done using all
2222 # the actions from the manifest, not just the
2223 # ones for this image's variants.
2224 sig_pol
.process_signatures(sigs
,
2225 manf
.gen_actions(), pub
, self
.trust_anchors
,
2226 self
.cfg
.get_policy(
2227 "check-certificate-revocation"))
2228 except apx
.SigningException
as e
:
2230 yield e
.sig
, [e
], [], []
2231 except apx
.InvalidResourceLocation
as e
:
2232 yield None, [e
], [], []
2234 progresstracker
.plan_add_progress(
2235 progresstracker
.PLAN_PKG_VERIFY
, nitems
=0)
2236 def mediation_allowed(act
):
2237 """Helper function to determine if the mediation
2238 delivered by a link is allowed. If it is, then
2239 the link should be verified. (Yes, this does mean
2240 that the non-existence of links is not verified.)
2243 mediator
= act
.attrs
.get("mediator")
2244 if not mediator
or mediator
not in self
.cfg
.mediators
:
2245 # Link isn't mediated or mediation is unknown.
2248 cfg_med_version
= self
.cfg
.mediators
[mediator
].get(
2250 cfg_med_impl
= self
.cfg
.mediators
[mediator
].get(
2253 med_version
= act
.attrs
.get("mediator-version")
2255 med_version
= pkg
.version
.Version(
2257 med_impl
= act
.attrs
.get("mediator-implementation")
2259 return med_version
== cfg_med_version
and \
2260 med
.mediator_impl_matches(med_impl
, cfg_med_impl
)
2262 # pkg verify only looks at actions that have not been dehydrated.
2263 excludes
= self
.list_excludes()
2264 vardrate_excludes
= [self
.cfg
.variants
.allow_action
]
2265 dehydrate
= self
.cfg
.get_property("property", "dehydrated")
2267 func
= self
.get_dehydrated_exclude_func(dehydrate
)
2268 excludes
.append(func
)
2269 vardrate_excludes
.append(func
)
2271 for act
in manf
.gen_actions():
2272 progresstracker
.plan_add_progress(
2273 progresstracker
.PLAN_PKG_VERIFY
, nitems
=0)
2274 if (act
.name
== "link" or
2275 act
.name
== "hardlink") and \
2276 not mediation_allowed(act
):
2277 # Link doesn't match configured
2278 # mediation, so shouldn't be verified.
2284 if act
.include_this(excludes
, publisher
=fmri
.publisher
):
2285 errors
, warnings
, info
= act
.verify(
2286 self
, pfmri
=fmri
, **kwargs
)
2287 elif act
.include_this(vardrate_excludes
,
2288 publisher
=fmri
.publisher
) and not act
.refcountable
:
2289 # Verify that file that is faceted out does not
2290 # exist. Exclude actions which may be delivered
2291 # from multiple packages.
2292 path
= act
.attrs
.get("path", None)
2293 if path
is not None and os
.path
.exists(
2294 os
.path
.join(self
.root
, path
)):
2296 _("File should not exist"))
2298 # Action that is not applicable to image variant
2299 # or has been dehydrated.
2302 if errors
or warnings
or info
:
2303 yield act
, errors
, warnings
, info
2305 def image_config_update(self
, new_variants
, new_facets
, new_mediators
):
2306 """update variants in image config"""
2308 if new_variants
is not None:
2309 self
.cfg
.variants
.update(new_variants
)
2310 if new_facets
is not None:
2311 self
.cfg
.facets
= new_facets
2312 if new_mediators
is not None:
2313 self
.cfg
.mediators
= new_mediators
2316 def __verify_manifest(self
, fmri
, mfstpath
, alt_pub
=None):
2317 """Verify a manifest. The caller must supply the FMRI
2318 for the package in 'fmri', as well as the path to the
2319 manifest file that will be verified."""
2322 return self
.transport
._verify
_manifest
(fmri
,
2323 mfstpath
=mfstpath
, pub
=alt_pub
)
2324 except InvalidContentException
:
2327 def has_manifest(self
, pfmri
, alt_pub
=None):
2328 """Check to see if the manifest for pfmri is present on disk and
2329 has the correct hash."""
2331 pth
= self
.get_manifest_path(pfmri
)
2332 on_disk
= os
.path
.exists(pth
)
2335 self
.is_pkg_installed(pfmri
) or \
2336 self
.__verify
_manifest
(fmri
=pfmri
, mfstpath
=pth
, alt_pub
=alt_pub
):
2340 def get_license_dir(self
, pfmri
):
2341 """Return path to package license directory."""
2342 if self
.version
== self
.CURRENT_VERSION
:
2343 # Newer image format stores license files per-stem,
2344 # instead of per-stem and version, so that transitions
2345 # between package versions don't require redelivery
2347 return os
.path
.join(self
.imgdir
, "license",
2348 pfmri
.get_dir_path(stemonly
=True))
2349 # Older image formats store license files in the manifest cache
2351 return self
.get_manifest_dir(pfmri
)
2353 def __get_installed_pkg_publisher(self
, pfmri
):
2354 """Returns the publisher for the FMRI of an installed package
2355 or None if the package is not installed.
2357 for f
in self
.gen_installed_pkgs():
2358 if f
.pkg_name
== pfmri
.pkg_name
:
2362 def get_manifest_dir(self
, pfmri
):
2363 """Return path to on-disk manifest cache directory."""
2364 if not pfmri
.publisher
:
2365 # Needed for consumers such as search that don't provide
2366 # publisher information.
2367 pfmri
= pfmri
.copy()
2368 pfmri
.publisher
= self
.__get
_installed
_pkg
_publisher
(
2370 assert pfmri
.publisher
2371 if self
.version
== self
.CURRENT_VERSION
:
2372 root
= self
._get
_publisher
_cache
_root
(pfmri
.publisher
)
2375 return os
.path
.join(root
, "pkg", pfmri
.get_dir_path())
2377 def get_manifest_path(self
, pfmri
):
2378 """Return path to on-disk manifest file."""
2379 if not pfmri
.publisher
:
2380 # Needed for consumers such as search that don't provide
2381 # publisher information.
2382 pfmri
= pfmri
.copy()
2383 pfmri
.publisher
= self
.__get
_installed
_pkg
_publisher
(
2385 assert pfmri
.publisher
2386 if self
.version
== self
.CURRENT_VERSION
:
2387 root
= os
.path
.join(self
._get
_publisher
_meta
_root
(
2389 return os
.path
.join(root
, "pkg", pfmri
.get_dir_path())
2390 return os
.path
.join(self
.get_manifest_dir(pfmri
),
2393 def __get_manifest(self
, fmri
, excludes
=EmptyI
, intent
=None,
2395 """Find on-disk manifest and create in-memory Manifest
2396 object.... grab from server if needed"""
2399 if not self
.has_manifest(fmri
, alt_pub
=alt_pub
):
2401 ret
= manifest
.FactoredManifest(fmri
,
2402 self
.get_manifest_dir(fmri
),
2404 pathname
=self
.get_manifest_path(fmri
))
2406 # if we have a intent string, let depot
2407 # know for what we're using the cached manifest
2411 alt_repo
= alt_pub
.repository
2413 self
.transport
.touch_manifest(fmri
,
2414 intent
, alt_repo
=alt_repo
)
2415 except (apx
.UnknownPublisher
,
2416 apx
.TransportError
):
2417 # It's not fatal if we can't find
2418 # or reach the publisher.
2421 ret
= self
.transport
.get_manifest(fmri
, excludes
,
2422 intent
, pub
=alt_pub
)
2425 def get_manifest(self
, fmri
, ignore_excludes
=False, intent
=None,
2427 """return manifest; uses cached version if available.
2428 ignore_excludes controls whether manifest contains actions
2431 If 'ignore_excludes' is set to True, then all actions in the
2432 manifest are included, regardless of variant or facet tags. If
2433 set to False, then the variants and facets currently set in the
2434 image will be applied, potentially filtering out some of the
2437 # Normally elide other arch variants, facets
2442 excludes
= [self
.cfg
.variants
.allow_action
,
2443 self
.cfg
.facets
.allow_action
]
2446 m
= self
.__get
_manifest
(fmri
, excludes
=excludes
,
2447 intent
=intent
, alt_pub
=alt_pub
)
2448 except apx
.ActionExecutionError
as e
:
2450 except pkg
.actions
.ActionError
as e
:
2451 raise apx
.InvalidPackageErrors([e
])
2455 def update_pkg_installed_state(self
, pkg_pairs
, progtrack
):
2456 """Sets the recorded installed state of each package pair in
2457 'pkg_pairs'. 'pkg_pair' should be an iterable of tuples of
2458 the format (added, removed) where 'removed' is the FMRI of the
2459 package that was uninstalled, and 'added' is the package
2460 installed for the operation. These pairs are representative of
2461 the destination and origin package for each part of the
2464 if self
.version
< self
.CURRENT_VERSION
:
2465 raise apx
.ImageFormatUpdateNeeded(self
.root
)
2467 kcat
= self
.get_catalog(self
.IMG_CATALOG_KNOWN
)
2468 icat
= self
.get_catalog(self
.IMG_CATALOG_INSTALLED
)
2473 for add_pkg
, rem_pkg
in pkg_pairs
:
2474 if add_pkg
== rem_pkg
:
2479 removed
.add(rem_pkg
)
2480 if add_pkg
and rem_pkg
:
2481 updated
[add_pkg
] = \
2482 dict(kcat
.get_entry(rem_pkg
).get(
2485 combo
= added
.union(removed
)
2487 progtrack
.job_start(progtrack
.JOB_STATE_DB
)
2488 # 'Updating package state database'
2490 progtrack
.job_add_progress(progtrack
.JOB_STATE_DB
)
2491 entry
= kcat
.get_entry(pfmri
)
2492 mdata
= entry
.get("metadata", {})
2493 states
= set(mdata
.get("states", set()))
2494 if pfmri
in removed
:
2495 icat
.remove_package(pfmri
)
2496 states
.discard(pkgdefs
.PKG_STATE_INSTALLED
)
2497 mdata
.pop("last-install", None)
2498 mdata
.pop("last-update", None)
2501 states
.add(pkgdefs
.PKG_STATE_INSTALLED
)
2502 cur_time
= pkg
.catalog
.now_to_basic_ts()
2503 if pfmri
in updated
:
2504 last_install
= updated
[pfmri
].get(
2507 mdata
["last-install"] = \
2509 mdata
["last-update"] = \
2512 mdata
["last-install"] = \
2515 mdata
["last-install"] = cur_time
2516 if pkgdefs
.PKG_STATE_ALT_SOURCE
in states
:
2518 pkgdefs
.PKG_STATE_UPGRADABLE
)
2520 pkgdefs
.PKG_STATE_ALT_SOURCE
)
2522 pkgdefs
.PKG_STATE_KNOWN
)
2523 elif pkgdefs
.PKG_STATE_KNOWN
not in states
:
2524 # This entry is no longer available and has no
2525 # meaningful state information, so should be
2527 kcat
.remove_package(pfmri
)
2528 progtrack
.job_add_progress(
2529 progtrack
.JOB_STATE_DB
)
2532 if (pkgdefs
.PKG_STATE_INSTALLED
in states
and
2533 pkgdefs
.PKG_STATE_UNINSTALLED
in states
) or (
2534 pkgdefs
.PKG_STATE_KNOWN
in states
and
2535 pkgdefs
.PKG_STATE_UNKNOWN
in states
):
2536 raise apx
.ImagePkgStateError(pfmri
,
2539 # Catalog format only supports lists.
2540 mdata
["states"] = list(states
)
2542 # Now record the package state.
2543 kcat
.update_entry(mdata
, pfmri
=pfmri
)
2545 # If the package is being marked as installed,
2546 # then it shouldn't already exist in the
2547 # installed catalog and should be added.
2549 icat
.append(kcat
, pfmri
=pfmri
)
2551 entry
= mdata
= states
= None
2552 progtrack
.job_add_progress(progtrack
.JOB_STATE_DB
)
2553 progtrack
.job_done(progtrack
.JOB_STATE_DB
)
2555 # Discard entries for alternate source packages that weren't
2556 # installed as part of the operation.
2557 if self
.__alt
_pkg
_pub
_map
:
2558 for pfmri
in self
.__alt
_known
_cat
.fmris():
2563 entry
= kcat
.get_entry(pfmri
)
2565 # The only reason that the entry should
2566 # not exist in the 'known' part is
2567 # because it was removed during the
2569 assert pfmri
in removed
2572 states
= entry
.get("metadata", {}).get("states",
2574 if pkgdefs
.PKG_STATE_ALT_SOURCE
in states
:
2575 kcat
.remove_package(pfmri
)
2577 # Now add the publishers of packages that were installed
2578 # from temporary sources that did not previously exist
2579 # to the image's configuration. (But without any
2580 # origins, sticky, and enabled.)
2581 cfgpubs
= set(self
.cfg
.publishers
.keys())
2582 instpubs
= set(f
.publisher
for f
in added
)
2583 altpubs
= self
.__alt
_known
_cat
.publishers()
2585 # List of publishers that need to be added is the
2586 # intersection of installed and alternate minus
2587 # the already configured.
2588 newpubs
= (instpubs
& altpubs
) - cfgpubs
2590 npub
= publisher
.Publisher(pfx
,
2591 repository
=publisher
.Repository())
2592 self
.__add
_publisher
(npub
,
2593 refresh_allowed
=False)
2595 # Ensure image configuration reflects new information.
2596 self
.__cleanup
_alt
_pkg
_certs
()
2599 # Remove manifests of packages that were removed from the
2600 # system. Some packages may have only had facets or
2601 # variants changed, so don't remove those.
2603 # 'Updating package cache'
2604 progtrack
.job_start(progtrack
.JOB_PKG_CACHE
, goal
=len(removed
))
2605 for pfmri
in removed
:
2606 mcdir
= self
.get_manifest_dir(pfmri
)
2607 manifest
.FactoredManifest
.clear_cache(mcdir
)
2609 # Remove package cache directory if possible; we don't
2612 os
.rmdir(os
.path
.dirname(mcdir
))
2616 mpath
= self
.get_manifest_path(pfmri
)
2618 portable
.remove(mpath
)
2619 except EnvironmentError as e
:
2620 if e
.errno
!= errno
.ENOENT
:
2621 raise apx
._convert
_error
(e
)
2623 # Remove package manifest directory if possible; we
2624 # don't care if it fails.
2626 os
.rmdir(os
.path
.dirname(mpath
))
2629 progtrack
.job_add_progress(progtrack
.JOB_PKG_CACHE
)
2630 progtrack
.job_done(progtrack
.JOB_PKG_CACHE
)
2632 progtrack
.job_start(progtrack
.JOB_IMAGE_STATE
)
2634 # Temporarily redirect the catalogs to a different location,
2635 # so that if the save is interrupted, the image won't be left
2636 # with invalid state, and then save them.
2637 tmp_state_root
= self
.temporary_dir()
2640 for cat
, name
in ((kcat
, self
.IMG_CATALOG_KNOWN
),
2641 (icat
, self
.IMG_CATALOG_INSTALLED
)):
2642 cpath
= os
.path
.join(tmp_state_root
, name
)
2644 # Must copy the old catalog data to the new
2645 # destination as only changed files will be
2647 progtrack
.job_add_progress(
2648 progtrack
.JOB_IMAGE_STATE
)
2649 misc
.copytree(cat
.meta_root
, cpath
)
2650 progtrack
.job_add_progress(
2651 progtrack
.JOB_IMAGE_STATE
)
2652 cat
.meta_root
= cpath
2653 cat
.finalize(pfmris
=added
)
2654 progtrack
.job_add_progress(
2655 progtrack
.JOB_IMAGE_STATE
)
2657 progtrack
.job_add_progress(
2658 progtrack
.JOB_IMAGE_STATE
)
2661 self
.__init
_catalogs
()
2662 progtrack
.job_add_progress(progtrack
.JOB_IMAGE_STATE
)
2664 # copy any other state files from current state
2665 # dir into new state dir.
2666 for p
in os
.listdir(self
._statedir
):
2667 progtrack
.job_add_progress(
2668 progtrack
.JOB_IMAGE_STATE
)
2669 fp
= os
.path
.join(self
._statedir
, p
)
2670 if os
.path
.isfile(fp
):
2671 portable
.copyfile(fp
,
2672 os
.path
.join(tmp_state_root
, p
))
2674 # Next, preserve the old installed state dir, rename the
2675 # new one into place, and then remove the old one.
2676 orig_state_root
= self
.salvage(self
._statedir
,
2678 portable
.rename(tmp_state_root
, self
._statedir
)
2680 progtrack
.job_add_progress(progtrack
.JOB_IMAGE_STATE
)
2681 shutil
.rmtree(orig_state_root
, True)
2683 progtrack
.job_add_progress(progtrack
.JOB_IMAGE_STATE
)
2684 except EnvironmentError as e
:
2685 # shutil.Error can contains a tuple of lists of errors.
2686 # Some of the error entries may be a tuple others will
2687 # be a string due to poor error handling in shutil.
2688 if isinstance(e
, shutil
.Error
) and \
2689 type(e
.args
[0]) == list:
2691 for elist
in e
.args
:
2693 if type(entry
) == tuple:
2694 msg
+= "{0}\n".format(
2697 msg
+= "{0}\n".format(
2699 raise apx
.UnknownErrors(msg
)
2700 raise apx
._convert
_error
(e
)
2702 # Regardless of success, the following must happen.
2703 self
.__init
_catalogs
()
2704 if os
.path
.exists(tmp_state_root
):
2705 shutil
.rmtree(tmp_state_root
, True)
2707 progtrack
.job_done(progtrack
.JOB_IMAGE_STATE
)
2709 def get_catalog(self
, name
):
2710 """Returns the requested image catalog.
2712 'name' must be one of the following image constants:
2714 The known catalog contains all of packages that are
2715 installed or available from a publisher's repository.
2717 IMG_CATALOG_INSTALLED
2718 The installed catalog is a subset of the 'known'
2719 catalog that only contains installed packages."""
2722 raise RuntimeError("self.imgdir must be set")
2724 cat
= self
.__catalogs
.get(name
)
2726 cat
= self
.__get
_catalog
(name
)
2727 self
.__catalogs
[name
] = cat
2729 if name
== self
.IMG_CATALOG_KNOWN
:
2730 # Apply alternate package source data every time that
2731 # the known catalog is requested.
2732 self
.__apply
_alt
_pkg
_sources
(cat
)
2736 def _manifest_cb(self
, cat
, f
):
2737 # Only allow lazy-load for packages from non-v1 sources.
2738 # Assume entries for other sources have all data
2739 # required in catalog. This prevents manifest retrieval
2740 # for packages that don't have any related action data
2741 # in the catalog because they don't have any related
2742 # action data in their manifest.
2743 entry
= cat
.get_entry(f
)
2744 states
= entry
["metadata"]["states"]
2745 if pkgdefs
.PKG_STATE_V1
not in states
:
2746 return self
.get_manifest(f
, ignore_excludes
=True)
2749 def __get_catalog(self
, name
):
2750 """Private method to retrieve catalog; this bypasses the
2751 normal automatic caching (unless the image hasn't been
2754 if self
.__upgraded
and self
.version
< 3:
2755 # Assume the catalog is already cached in this case
2756 # and can't be reloaded from disk as it doesn't exist
2758 return self
.__catalogs
[name
]
2760 croot
= os
.path
.join(self
._statedir
, name
)
2763 except EnvironmentError as e
:
2764 if e
.errno
in (errno
.EACCES
, errno
.EROFS
):
2765 # Allow operations to work for
2766 # unprivileged users.
2768 elif e
.errno
!= errno
.EEXIST
:
2771 # batch_mode is set to True here as any operations that modify
2772 # the catalogs (add or remove entries) are only done during an
2773 # image upgrade or metadata refresh. In both cases, the catalog
2774 # is resorted and finalized so this is always safe to use.
2775 cat
= pkg
.catalog
.Catalog(batch_mode
=True,
2776 manifest_cb
=self
._manifest
_cb
, meta_root
=croot
, sign
=False)
2779 def __remove_catalogs(self
):
2780 """Removes all image catalogs and their directories."""
2782 self
.__init
_catalogs
()
2783 for name
in (self
.IMG_CATALOG_KNOWN
,
2784 self
.IMG_CATALOG_INSTALLED
):
2785 shutil
.rmtree(os
.path
.join(self
._statedir
, name
))
2787 def get_version_installed(self
, pfmri
):
2788 """Returns an fmri of the installed package matching the
2789 package stem of the given fmri or None if no match is found."""
2791 cat
= self
.get_catalog(self
.IMG_CATALOG_INSTALLED
)
2792 for ver
, fmris
in cat
.fmris_by_version(pfmri
.pkg_name
):
2796 def get_pkg_repo(self
, pfmri
):
2797 """Returns the repository object containing the origins that
2798 should be used to retrieve the specified package or None if
2799 it can be retrieved from all sources or is not a known package.
2802 assert pfmri
.publisher
2803 cat
= self
.get_catalog(self
.IMG_CATALOG_KNOWN
)
2804 entry
= cat
.get_entry(pfmri
)
2806 # Package not known.
2810 slist
= entry
["metadata"]["sources"]
2812 # Can be retrieved from any source.
2816 # Can be retrieved from any source.
2819 pub
= self
.get_publisher(prefix
=pfmri
.publisher
)
2820 repo
= copy
.copy(pub
.repository
)
2822 o
for o
in repo
.origins
2827 # Known sources don't match configured; return so that
2828 # caller can fallback to default behaviour.
2831 repo
.origins
= norigins
2834 def get_pkg_state(self
, pfmri
):
2835 """Returns the list of states a package is in for this image."""
2837 cat
= self
.get_catalog(self
.IMG_CATALOG_KNOWN
)
2838 entry
= cat
.get_entry(pfmri
)
2841 return entry
["metadata"]["states"]
2843 def is_pkg_installed(self
, pfmri
):
2844 """Returns a boolean value indicating whether the specified
2845 package is installed."""
2847 # Avoid loading the installed catalog if the known catalog
2848 # is already loaded. This is safe since the installed
2849 # catalog is a subset of the known, and a specific entry
2850 # is being retrieved.
2851 if not self
.__catalog
_loaded
(self
.IMG_CATALOG_KNOWN
):
2852 cat
= self
.get_catalog(self
.IMG_CATALOG_INSTALLED
)
2854 cat
= self
.get_catalog(self
.IMG_CATALOG_KNOWN
)
2856 entry
= cat
.get_entry(pfmri
)
2859 states
= entry
["metadata"]["states"]
2860 return pkgdefs
.PKG_STATE_INSTALLED
in states
2862 def list_excludes(self
, new_variants
=None, new_facets
=None):
2863 """Generate a list of callables that each return True if an
2864 action is to be included in the image using the currently
2865 defined variants & facets for the image, or an updated set if
2866 new_variants or new_facets are specified."""
2869 new_vars
= self
.cfg
.variants
.copy()
2870 new_vars
.update(new_variants
)
2871 var_call
= new_vars
.allow_action
2873 var_call
= self
.cfg
.variants
.allow_action
2874 if new_facets
is not None:
2875 fac_call
= new_facets
.allow_action
2877 fac_call
= self
.cfg
.facets
.allow_action
2879 return [var_call
, fac_call
]
2881 def get_variants(self
):
2882 """ return a copy of the current image variants"""
2883 return self
.cfg
.variants
.copy()
2885 def get_facets(self
):
2886 """ Return a copy of the current image facets"""
2887 return self
.cfg
.facets
.copy()
2889 def __state_updating_pathname(self
):
2890 """Return the path to a flag file indicating that the image
2891 catalog is being updated."""
2892 return os
.path
.join(self
._statedir
, self
.__STATE
_UPDATING
_FILE
)
2894 def __start_state_update(self
):
2895 """Called when we start updating the image catalog. Normally
2896 returns False, but will return True if a previous update was
2899 # get the path to the image catalog update flag file
2900 pathname
= self
.__state
_updating
_pathname
()
2902 # if the flag file exists a previous update was interrupted so
2904 if os
.path
.exists(pathname
):
2907 # create the flag file and return 0
2908 file_mode
= misc
.PKG_FILE_MODE
2911 os
.chmod(pathname
, file_mode
)
2912 except EnvironmentError as e
:
2913 if e
.errno
== errno
.EACCES
:
2914 raise apx
.PermissionsException(e
.filename
)
2915 if e
.errno
== errno
.EROFS
:
2916 raise apx
.ReadOnlyFileSystemException(
2921 def __end_state_update(self
):
2922 """Called when we're done updating the image catalog."""
2924 # get the path to the image catalog update flag file
2925 pathname
= self
.__state
_updating
_pathname
()
2927 # delete the flag file.
2929 portable
.remove(pathname
)
2930 except EnvironmentError as e
:
2931 if e
.errno
== errno
.EACCES
:
2932 raise apx
.PermissionsException(e
.filename
)
2933 if e
.errno
== errno
.EROFS
:
2934 raise apx
.ReadOnlyFileSystemException(
2938 def __rebuild_image_catalogs(self
, progtrack
=None):
2939 """Rebuilds the image catalogs based on the available publisher
2942 if self
.version
< 3:
2943 raise apx
.ImageFormatUpdateNeeded(self
.root
)
2946 progtrack
= progress
.NullProgressTracker()
2948 progtrack
.cache_catalogs_start()
2950 publist
= list(self
.gen_publishers())
2952 be_name
, be_uuid
= bootenv
.BootEnv
.get_be_name(self
.root
)
2953 self
.history
.log_operation_start("rebuild-image-catalogs",
2954 be_name
=be_name
, be_uuid
=be_uuid
)
2956 # Mark all operations as occurring at this time.
2957 op_time
= datetime
.datetime
.utcnow()
2959 # The image catalogs need to be updated, but this is a bit
2960 # tricky as previously known packages must remain known even
2961 # if PKG_STATE_KNOWN is no longer true if any other state
2962 # information is present. This is to allow freezing, etc. of
2963 # package states on a permanent basis even if the package is
2964 # no longer available from a publisher repository. However,
2965 # this is only True of installed packages.
2966 old_icat
= self
.get_catalog(self
.IMG_CATALOG_INSTALLED
)
2968 # batch_mode is set to True here since without it, catalog
2969 # population time is almost doubled (since the catalog is
2970 # re-sorted and stats are generated for every operation).
2971 # In addition, the new catalog is first created in a new
2972 # temporary directory so that it can be moved into place
2973 # at the very end of this process (to minimize the chance
2974 # that failure or interruption will cause the image to be
2975 # left in an inconsistent state).
2976 tmp_state_root
= self
.temporary_dir()
2978 # Copy any regular files placed in the state directory
2979 for p
in os
.listdir(self
._statedir
):
2980 if p
== self
.__STATE
_UPDATING
_FILE
:
2981 # don't copy the state updating file
2983 fp
= os
.path
.join(self
._statedir
, p
)
2984 if os
.path
.isfile(fp
):
2985 portable
.copyfile(fp
, os
.path
.join(tmp_state_root
, p
))
2987 kcat
= pkg
.catalog
.Catalog(batch_mode
=True,
2988 meta_root
=os
.path
.join(tmp_state_root
,
2989 self
.IMG_CATALOG_KNOWN
), sign
=False)
2991 # XXX if any of the below fails for any reason, the old 'known'
2992 # catalog needs to be re-loaded so the client is in a consistent
2995 # All enabled publisher catalogs must be processed.
2996 pub_cats
= [(pub
.prefix
, pub
.catalog
) for pub
in publist
]
2998 # XXX For backwards compatibility, 'upgradability' of packages
2999 # is calculated and stored based on whether a given pkg stem
3000 # matches the newest version in the catalog. This is quite
3001 # expensive (due to overhead), but at least the cost is
3002 # consolidated here. This comparison is also cross-publisher,
3003 # as it used to be. In the future, it could likely be improved
3004 # by usage of the SAT solver.
3006 for pfx
, cat
in [(None, old_icat
)] + pub_cats
:
3007 for f
in cat
.fmris(last
=True,
3008 pubs
=pfx
and [pfx
] or EmptyI
):
3009 nver
, snver
= newest
.get(f
.pkg_name
, (None,
3011 if f
.version
> nver
:
3012 newest
[f
.pkg_name
] = (f
.version
,
3015 # Next, copy all of the entries for the catalog parts that
3016 # currently exist into the image 'known' catalog.
3018 # Iterator for source parts.
3020 (pfx
, cat
, name
, cat
.get_part(name
, must_exist
=True))
3021 for pfx
, cat
in pub_cats
3022 for name
in cat
.parts
3025 # Build list of installed packages based on actual state
3026 # information just in case there is a state issue from an
3029 for t
, entry
in old_icat
.tuple_entries():
3030 states
= entry
["metadata"]["states"]
3031 if pkgdefs
.PKG_STATE_INSTALLED
not in states
:
3034 inst_stems
.setdefault(pub
, {})
3035 inst_stems
[pub
].setdefault(stem
, {})
3036 inst_stems
[pub
][stem
][ver
] = False
3038 # Create the new installed catalog in a temporary location.
3039 icat
= pkg
.catalog
.Catalog(batch_mode
=True,
3040 meta_root
=os
.path
.join(tmp_state_root
,
3041 self
.IMG_CATALOG_INSTALLED
), sign
=False)
3043 excludes
= self
.list_excludes()
3045 frozen_pkgs
= dict([
3046 (p
[0].pkg_name
, p
[0]) for p
in self
.get_frozen_list()
3048 for pfx
, cat
, name
, spart
in sparts
:
3049 # 'spart' is the source part.
3051 # Client hasn't retrieved this part.
3055 nkpart
= kcat
.get_part(name
)
3056 nipart
= icat
.get_part(name
)
3057 base
= name
.startswith("catalog.base.")
3059 # Avoid accessor overhead since these will be
3060 # used for every entry.
3061 cat_ver
= cat
.version
3062 dp
= cat
.get_part("catalog.dependency.C",
3065 for t
, sentry
in spart
.tuple_entries(pubs
=[pfx
]):
3069 if pub
in inst_stems
and \
3070 stem
in inst_stems
[pub
] and \
3071 ver
in inst_stems
[pub
][stem
]:
3073 inst_stems
[pub
][stem
][ver
] = True
3075 # copy() is too slow here and catalog entries
3076 # are shallow so this should be sufficient.
3077 entry
= dict(sentry
.iteritems())
3079 # Nothing else to do except add the
3080 # entry for non-base catalog parts.
3081 nkpart
.add(metadata
=entry
,
3082 op_time
=op_time
, pub
=pub
, stem
=stem
,
3085 nipart
.add(metadata
=entry
,
3086 op_time
=op_time
, pub
=pub
,
3090 # Only the base catalog part stores package
3091 # state information and/or other metadata.
3092 mdata
= entry
.setdefault("metadata", {})
3093 states
= mdata
.setdefault("states", [])
3094 states
.append(pkgdefs
.PKG_STATE_KNOWN
)
3097 states
.append(pkgdefs
.PKG_STATE_V0
)
3098 elif pkgdefs
.PKG_STATE_V0
not in states
:
3099 # Assume V1 catalog source.
3100 states
.append(pkgdefs
.PKG_STATE_V1
)
3104 pkgdefs
.PKG_STATE_INSTALLED
)
3106 nver
, snver
= newest
.get(stem
, (None, None))
3107 if snver
is not None and ver
!= snver
:
3109 pkgdefs
.PKG_STATE_UPGRADABLE
)
3111 # Check if the package is frozen.
3112 if stem
in frozen_pkgs
:
3113 f_ver
= frozen_pkgs
[stem
].version
3114 if f_ver
== ver
or \
3115 pkg
.version
.Version(ver
3116 ).is_successor(f_ver
,
3118 pkg
.version
.CONSTRAINT_AUTO
):
3120 pkgdefs
.PKG_STATE_FROZEN
)
3122 # Determine if package is obsolete or has been
3123 # renamed and mark with appropriate state.
3126 dpent
= dp
.get_entry(pub
=pub
, stem
=stem
,
3128 if dpent
is not None:
3129 for a
in dpent
["actions"]:
3130 # Constructing action objects
3131 # for every action would be a
3132 # lot slower, so a simple string
3133 # match is done first so that
3134 # only interesting actions get
3136 if not a
.startswith("set"):
3138 if not ("pkg.obsolete" in a
or \
3139 "pkg.renamed" in a
):
3143 act
= pkg
.actions
.fromstr(a
)
3144 except pkg
.actions
.ActionError
:
3145 # If the action can't be
3146 # parsed or is not yet
3147 # supported, continue.
3150 if act
.attrs
["value"].lower() != "true":
3153 if act
.attrs
["name"] == "pkg.obsolete":
3155 pkgdefs
.PKG_STATE_OBSOLETE
)
3156 elif act
.attrs
["name"] == "pkg.renamed":
3157 if not act
.include_this(
3158 excludes
, publisher
=pub
):
3161 pkgdefs
.PKG_STATE_RENAMED
)
3163 mdata
["states"] = states
3166 nkpart
.add(metadata
=entry
, op_time
=op_time
,
3167 pub
=pub
, stem
=stem
, ver
=ver
)
3169 nipart
.add(metadata
=entry
,
3170 op_time
=op_time
, pub
=pub
, stem
=stem
,
3173 # Now add installed packages to list of known packages using
3174 # previous state information. While doing so, track any
3175 # new entries as the versions for the stem of the entry will
3176 # need to be passed to finalize() for sorting.
3178 for name
in old_icat
.parts
:
3179 # Old installed part.
3180 ipart
= old_icat
.get_part(name
, must_exist
=True)
3183 nkpart
= kcat
.get_part(name
)
3185 # New installed part.
3186 nipart
= icat
.get_part(name
)
3188 base
= name
.startswith("catalog.base.")
3191 for t
, entry
in ipart
.tuple_entries():
3194 if pub
not in inst_stems
or \
3195 stem
not in inst_stems
[pub
] or \
3196 ver
not in inst_stems
[pub
][stem
] or \
3197 inst_stems
[pub
][stem
][ver
]:
3198 # Entry is no longer valid or is already
3203 mdata
= entry
["metadata"]
3204 states
= set(mdata
["states"])
3205 states
.discard(pkgdefs
.PKG_STATE_KNOWN
)
3207 nver
, snver
= newest
.get(stem
, (None,
3210 (snver
is not None and ver
== snver
):
3212 pkgdefs
.PKG_STATE_UPGRADABLE
)
3213 elif snver
is not None:
3215 pkgdefs
.PKG_STATE_UPGRADABLE
)
3216 mdata
["states"] = list(states
)
3219 nkpart
.add(metadata
=entry
, op_time
=op_time
,
3220 pub
=pub
, stem
=stem
, ver
=ver
)
3221 nipart
.add(metadata
=entry
, op_time
=op_time
,
3222 pub
=pub
, stem
=stem
, ver
=ver
)
3223 final_fmris
.append(pkg
.fmri
.PkgFmri(name
=stem
,
3224 publisher
=pub
, version
=ver
))
3226 # Save the new catalogs.
3227 for cat
in kcat
, icat
:
3228 misc
.makedirs(cat
.meta_root
)
3229 cat
.finalize(pfmris
=final_fmris
)
3232 # Next, preserve the old installed state dir, rename the
3233 # new one into place, and then remove the old one.
3234 orig_state_root
= self
.salvage(self
._statedir
, full_path
=True)
3235 portable
.rename(tmp_state_root
, self
._statedir
)
3236 shutil
.rmtree(orig_state_root
, True)
3238 # Ensure in-memory catalogs get reloaded.
3239 self
.__init
_catalogs
()
3241 progtrack
.cache_catalogs_done()
3242 self
.history
.log_operation_end()
3244 def refresh_publishers(self
, full_refresh
=False, immediate
=False,
3245 pubs
=None, progtrack
=None):
3246 """Refreshes the metadata (e.g. catalog) for one or more
3247 publishers. Callers are responsible for locking the image.
3249 'full_refresh' is an optional boolean value indicating whether
3250 a full retrieval of publisher metadata (e.g. catalogs) or only
3251 an update to the existing metadata should be performed. When
3252 True, 'immediate' is also set to True.
3254 'immediate' is an optional boolean value indicating whether the
3255 a refresh should occur now. If False, a publisher's selected
3256 repository will only be checked for updates if the update
3257 interval period recorded in the image configuration has been
3260 'pubs' is a list of publisher prefixes or publisher objects
3261 to refresh. Passing an empty list or using the default value
3262 implies all publishers."""
3264 if self
.version
< 3:
3265 raise apx
.ImageFormatUpdateNeeded(self
.root
)
3268 progtrack
= progress
.NullProgressTracker()
3270 be_name
, be_uuid
= bootenv
.BootEnv
.get_be_name(self
.root
)
3271 self
.history
.log_operation_start("refresh-publishers",
3272 be_name
=be_name
, be_uuid
=be_uuid
)
3274 pubs_to_refresh
= []
3277 # Omit disabled publishers.
3278 pubs
= [p
for p
in self
.gen_publishers()]
3281 self
.__rebuild
_image
_catalogs
(progtrack
=progtrack
)
3286 if not isinstance(p
, publisher
.Publisher
):
3287 p
= self
.get_publisher(prefix
=p
)
3289 e
= apx
.DisabledPublisher(p
)
3290 self
.history
.log_operation_end(error
=e
)
3292 pubs_to_refresh
.append(p
)
3294 if not pubs_to_refresh
:
3295 self
.history
.log_operation_end(
3296 result
=history
.RESULT_NOTHING_TO_DO
)
3299 # Verify validity of certificates before attempting network
3302 self
.check_cert_validity(pubs
=pubs_to_refresh
)
3303 except apx
.ExpiringCertificate
as e
:
3304 logger
.error(str(e
))
3307 # Ensure Image directory structure is valid.
3309 except Exception as e
:
3310 self
.history
.log_operation_end(error
=e
)
3313 progtrack
.refresh_start(len(pubs_to_refresh
),
3314 full_refresh
=full_refresh
)
3319 updated
= self
.__start
_state
_update
()
3320 for pub
in pubs_to_refresh
:
3322 progtrack
.refresh_start_pub(pub
)
3324 if pub
.refresh(full_refresh
=full_refresh
,
3325 immediate
=immediate
, progtrack
=progtrack
):
3327 except apx
.PermissionsException
as e
:
3328 failed
.append((pub
, e
))
3329 # No point in continuing since no data can
3332 except apx
.ApiException
as e
:
3333 failed
.append((pub
, e
))
3336 progtrack
.refresh_end_pub(pub
)
3337 succeeded
.add(pub
.prefix
)
3339 progtrack
.refresh_done()
3342 self
.__rebuild
_image
_catalogs
(progtrack
=progtrack
)
3344 self
.__end
_state
_update
()
3347 e
= apx
.CatalogRefreshException(failed
, total
,
3349 self
.history
.log_operation_end(error
=e
)
3353 self
.history
.log_operation_end(
3354 result
=history
.RESULT_NOTHING_TO_DO
)
3356 self
.history
.log_operation_end()
3358 def _get_publisher_meta_dir(self
):
3359 if self
.version
>= 3:
3363 def _get_publisher_cache_root(self
, prefix
):
3364 return os
.path
.join(self
.imgdir
, "cache", "publisher", prefix
)
3366 def _get_publisher_meta_root(self
, prefix
):
3367 return os
.path
.join(self
.imgdir
, self
._get
_publisher
_meta
_dir
(),
3370 def remove_publisher_metadata(self
, pub
, progtrack
=None, rebuild
=True):
3371 """Removes the metadata for the specified publisher object,
3372 except data for installed packages.
3374 'pub' is the object of the publisher to remove the data for.
3376 'progtrack' is an optional ProgressTracker object.
3378 'rebuild' is an optional boolean specifying whether image
3379 catalogs should be rebuilt after removing the publisher's
3383 if self
.version
< 4:
3384 # Older images don't require fine-grained deletion.
3385 pub
.remove_meta_root()
3387 self
.__rebuild
_image
_catalogs
(
3388 progtrack
=progtrack
)
3391 # Build a list of paths that shouldn't be removed because they
3392 # belong to installed packages.
3394 self
.get_manifest_path(f
)
3395 for f
in self
.gen_installed_pkgs()
3396 if f
.publisher
== pub
.prefix
3400 pub
.remove_meta_root()
3403 # Discard all publisher metadata except
3404 # package manifests as a first pass.
3405 for entry
in os
.listdir(pub
.meta_root
):
3409 target
= os
.path
.join(pub
.meta_root
,
3411 if os
.path
.isdir(target
):
3412 shutil
.rmtree(target
,
3415 portable
.remove(target
)
3417 # Build the list of directories that can't be
3419 exdirs
= [os
.path
.dirname(e
) for e
in excluded
]
3421 # Now try to discard only package manifests
3422 # that aren't for installed packages.
3423 mroot
= os
.path
.join(pub
.meta_root
, "pkg")
3424 for pdir
in os
.listdir(mroot
):
3425 proot
= os
.path
.join(mroot
, pdir
)
3426 if proot
not in exdirs
:
3427 # This removes all manifest data
3428 # for a given package stem.
3429 shutil
.rmtree(proot
,
3433 # Remove only manifest data for packages
3434 # that are not installed.
3435 for mname
in os
.listdir(proot
):
3436 mpath
= os
.path
.join(proot
,
3438 if mpath
not in excluded
:
3439 portable
.remove(mpath
)
3441 # Finally, dump any cache data for this
3442 # publisher if possible.
3443 shutil
.rmtree(self
._get
_publisher
_cache
_root
(
3444 pub
.prefix
), ignore_errors
=True)
3445 except EnvironmentError as e
:
3446 if e
.errno
!= errno
.ENOENT
:
3447 raise apx
._convert
_error
(e
)
3450 self
.__rebuild
_image
_catalogs
(progtrack
=progtrack
)
3452 def gen_installed_pkg_names(self
, anarchy
=True):
3453 """A generator function that produces FMRI strings as it
3454 iterates over the list of installed packages. This is
3455 faster than gen_installed_pkgs when only the FMRI string
3458 cat
= self
.get_catalog(self
.IMG_CATALOG_INSTALLED
)
3459 for f
in cat
.fmris(objects
=False):
3461 # Catalog entries always have publisher prefix.
3462 yield "pkg:/{0}".format(f
[6:].split("/", 1)[-1])
3466 def gen_installed_pkgs(self
, pubs
=EmptyI
, ordered
=False):
3467 """Return an iteration through the installed packages."""
3469 cat
= self
.get_catalog(self
.IMG_CATALOG_INSTALLED
)
3470 for f
in cat
.fmris(pubs
=pubs
, ordered
=ordered
):
3473 def count_installed_pkgs(self
, pubs
=EmptyI
):
3474 """Return the number of installed packages."""
3475 cat
= self
.get_catalog(self
.IMG_CATALOG_INSTALLED
)
3476 assert cat
.package_count
== cat
.package_version_count
3479 for (pub
, pkg_count
, _ignored
) in
3480 cat
.get_package_counts_by_pub(pubs
=pubs
)
3483 def gen_tracked_stems(self
):
3484 """Return an iteration through all the tracked pkg stems
3485 in the set of currently installed packages. Return value
3486 is group pkg fmri, stem"""
3487 cat
= self
.get_catalog(self
.IMG_CATALOG_INSTALLED
)
3488 excludes
= self
.list_excludes()
3490 for f
in cat
.fmris():
3491 for a
in cat
.get_entry_actions(f
,
3492 [pkg
.catalog
.Catalog
.DEPENDENCY
], excludes
=excludes
):
3493 if a
.name
== "depend" and a
.attrs
["type"] == "group":
3494 yield (f
, self
.strtofmri(
3495 a
.attrs
["fmri"]).pkg_name
)
3497 def _create_fast_lookups(self
, progtrack
=None):
3498 """Create an on-disk database mapping action name and key
3499 attribute value to the action string comprising the unique
3500 attributes of the action, for all installed actions. This is
3501 done with a file mapping the tuple to an offset into a second
3502 file, where those actions are kept. Once the offsets are loaded
3503 into memory, it is simple to seek into the second file to the
3504 given offset and read until you hit an action that doesn't
3508 progtrack
= progress
.NullProgressTracker()
3510 self
.__actdict
= None
3511 self
.__actdict
_timestamp
= None
3512 stripped_path
= os
.path
.join(self
.__action
_cache
_dir
,
3514 offsets_path
= os
.path
.join(self
.__action
_cache
_dir
,
3516 conflicting_keys_path
= os
.path
.join(self
.__action
_cache
_dir
,
3519 excludes
= self
.list_excludes()
3522 # nsd is the "name-space dictionary." It maps action name
3523 # spaces (see action.generic for more information) to
3524 # dictionaries which map keys to pairs which contain an action
3525 # with that key and the pfmri of the package which delivered the
3529 from heapq
import heappush
, heappop
3531 progtrack
.job_start(progtrack
.JOB_FAST_LOOKUP
)
3533 for pfmri
in self
.gen_installed_pkgs():
3534 progtrack
.job_add_progress(progtrack
.JOB_FAST_LOOKUP
)
3535 m
= self
.get_manifest(pfmri
, ignore_excludes
=True)
3536 for act
in m
.gen_actions(excludes
=excludes
):
3537 if not act
.globally_identical
:
3540 heappush(heap
, (act
.name
,
3541 act
.attrs
[act
.key_attr
], pfmri
, act
))
3542 nsd
.setdefault(act
.namespace_group
, {})
3543 nsd
[act
.namespace_group
].setdefault(
3544 act
.attrs
[act
.key_attr
], [])
3545 nsd
[act
.namespace_group
][
3546 act
.attrs
[act
.key_attr
]].append((
3549 progtrack
.job_add_progress(progtrack
.JOB_FAST_LOOKUP
)
3551 # If we can't write the temporary files, then there's no point
3552 # in producing actdict because it depends on a synchronized
3553 # stripped actions file.
3556 sf
, sp
= self
.temporary_file(close
=False)
3557 of
, op
= self
.temporary_file(close
=False)
3558 bf
, bp
= self
.temporary_file(close
=False)
3560 sf
= os
.fdopen(sf
, "wb")
3561 of
= os
.fdopen(of
, "wb")
3562 bf
= os
.fdopen(bf
, "wb")
3564 # We need to make sure the files are coordinated.
3565 timestamp
= int(time
.time())
3566 sf
.write("VERSION 1\n{0}\n".format(timestamp
))
3567 of
.write("VERSION 2\n{0}\n".format(timestamp
))
3568 # The conflicting keys file doesn't need a timestamp
3569 # because it's not coordinated with the stripped or
3570 # offsets files and the result of loading it isn't
3571 # reused by this class.
3572 bf
.write("VERSION 1\n")
3574 last_name
, last_key
, last_offset
= None, None, sf
.tell()
3577 # This is a tight loop, so try to avoid burning
3578 # CPU calling into the progress tracker
3580 if len(heap
) % 100 == 0:
3581 progtrack
.job_add_progress(
3582 progtrack
.JOB_FAST_LOOKUP
)
3583 item
= heappop(heap
)
3584 fmri
, act
= item
[2:]
3585 key
= act
.attrs
[act
.key_attr
]
3586 if act
.name
!= last_name
or key
!= last_key
:
3587 if last_name
is None:
3588 assert last_key
is None
3590 last_name
= act
.name
3594 of
.write("{0} {1} {2} {3}\n".format(
3595 last_name
, last_offset
,
3597 actdict
[(last_name
, last_key
)] = last_offset
, cnt
3598 last_name
, last_key
, last_offset
= \
3599 act
.name
, key
, sf
.tell()
3603 sf
.write("{0} {1}\n".format(fmri
, act
))
3604 if last_name
is not None:
3605 assert last_key
is not None
3606 assert last_offset
is not None
3608 of
.write("{0} {1} {2} {3}\n".format(
3609 last_name
, last_offset
, cnt
, last_key
))
3610 actdict
[(last_name
, last_key
)] = \
3613 progtrack
.job_add_progress(progtrack
.JOB_FAST_LOOKUP
)
3615 bad_keys
= imageplan
.ImagePlan
._check
_actions
(nsd
)
3616 for k
in sorted(bad_keys
):
3617 bf
.write("{0}\n".format(k
))
3619 progtrack
.job_add_progress(progtrack
.JOB_FAST_LOOKUP
)
3623 os
.chmod(sp
, misc
.PKG_FILE_MODE
)
3624 os
.chmod(op
, misc
.PKG_FILE_MODE
)
3625 os
.chmod(bp
, misc
.PKG_FILE_MODE
)
3626 except BaseException
as e
:
3635 progtrack
.job_add_progress(progtrack
.JOB_FAST_LOOKUP
)
3637 # Finally, rename the temporary files into their final place.
3638 # If we have any problems, do our best to remove them, and we'll
3639 # try to recreate them on the read-side.
3641 if not os
.path
.exists(self
.__action
_cache
_dir
):
3642 os
.makedirs(self
.__action
_cache
_dir
)
3643 portable
.rename(sp
, stripped_path
)
3644 portable
.rename(op
, offsets_path
)
3645 portable
.rename(bp
, conflicting_keys_path
)
3646 except EnvironmentError as e
:
3647 if e
.errno
== errno
.EACCES
or e
.errno
== errno
.EROFS
:
3648 self
.__action
_cache
_dir
= self
.temporary_dir()
3649 stripped_path
= os
.path
.join(
3650 self
.__action
_cache
_dir
, "actions.stripped")
3651 offsets_path
= os
.path
.join(
3652 self
.__action
_cache
_dir
, "actions.offsets")
3653 conflicting_keys_path
= os
.path
.join(
3654 self
.__action
_cache
_dir
, "keys.conflicting")
3655 portable
.rename(sp
, stripped_path
)
3656 portable
.rename(op
, offsets_path
)
3657 portable
.rename(bp
, conflicting_keys_path
)
3659 exc_info
= sys
.exc_info()
3661 os
.unlink(stripped_path
)
3662 os
.unlink(offsets_path
)
3663 os
.unlink(conflicting_keys_path
)
3666 raise exc_info
[0], exc_info
[1], exc_info
[2]
3668 progtrack
.job_add_progress(progtrack
.JOB_FAST_LOOKUP
)
3669 progtrack
.job_done(progtrack
.JOB_FAST_LOOKUP
)
3670 return actdict
, timestamp
3672 def _remove_fast_lookups(self
):
3673 """Remove on-disk database created by _create_fast_lookups.
3674 Should be called before updating image state to prevent the
3675 client from seeing stale state if _create_fast_lookups is
3678 for fname
in ("actions.stripped", "actions.offsets",
3679 "keys.conflicting"):
3681 portable
.remove(os
.path
.join(
3682 self
.__action
_cache
_dir
, fname
))
3683 except EnvironmentError as e
:
3684 if e
.errno
== errno
.ENOENT
:
3686 raise apx
._convert
_error
(e
)
3688 def _load_actdict(self
, progtrack
):
3689 """Read the file of offsets created in _create_fast_lookups()
3690 and return the dictionary mapping action name and key value to
3694 of
= open(os
.path
.join(self
.__action
_cache
_dir
,
3695 "actions.offsets"), "rb")
3696 except IOError as e
:
3697 if e
.errno
!= errno
.ENOENT
:
3699 actdict
, otimestamp
= self
._create
_fast
_lookups
()
3700 assert actdict
is not None
3701 self
.__actdict
= actdict
3702 self
.__actdict
_timestamp
= otimestamp
3705 # Make sure the files are paired, and try to create them if not.
3706 oversion
= of
.readline().rstrip()
3707 otimestamp
= of
.readline().rstrip()
3709 # The original action.offsets file existed and had the same
3710 # timestamp as the stored actdict, so that actdict can be
3712 if self
.__actdict
and otimestamp
== self
.__actdict
_timestamp
:
3713 return self
.__actdict
3715 sversion
, stimestamp
= self
._get
_stripped
_actions
_file
(
3718 # If we recognize neither file's version or their timestamps
3719 # don't match, then we blow them away and try again.
3720 if oversion
!= "VERSION 2" or sversion
!= "VERSION 1" or \
3721 stimestamp
!= otimestamp
:
3723 actdict
, otimestamp
= self
._create
_fast
_lookups
()
3724 assert actdict
is not None
3725 self
.__actdict
= actdict
3726 self
.__actdict
_timestamp
= otimestamp
3729 # At this point, the original actions.offsets file existed, no
3730 # actdict was saved in the image, the versions matched what was
3731 # expected, and the timestamps of the actions.offsets and
3732 # actions.stripped files matched, so the actions.offsets file is
3733 # parsed to generate actdict.
3737 actname
, offset
, cnt
, key_attr
= \
3738 line
.rstrip().split(None, 3)
3740 actdict
[(actname
, key_attr
)] = (off
, int(cnt
))
3742 # This is a tight loop, so try to avoid burning
3743 # CPU calling into the progress tracker excessively.
3744 # Since we are already using the offset, we use that
3745 # to damp calls back into the progress tracker.
3747 progtrack
.plan_add_progress(
3748 progtrack
.PLAN_ACTION_CONFLICT
)
3751 self
.__actdict
= actdict
3752 self
.__actdict
_timestamp
= otimestamp
3755 def _get_stripped_actions_file(self
, internal
=False):
3756 """Open the actions file described in _create_fast_lookups() and
3757 return the corresponding file object."""
3759 sf
= file(os
.path
.join(self
.__action
_cache
_dir
,
3760 "actions.stripped"), "rb")
3761 sversion
= sf
.readline().rstrip()
3762 stimestamp
= sf
.readline().rstrip()
3765 return sversion
, stimestamp
3769 def _load_conflicting_keys(self
):
3770 """Load the list of keys which have conflicting actions in the
3771 existing image. If no such list exists, then return None."""
3773 pth
= os
.path
.join(self
.__action
_cache
_dir
, "keys.conflicting")
3775 with
open(pth
, "rb") as fh
:
3776 version
= fh
.readline().rstrip()
3777 if version
!= "VERSION 1":
3779 return set(l
.rstrip() for l
in fh
)
3780 except EnvironmentError as e
:
3781 if e
.errno
== errno
.ENOENT
:
3785 def gen_installed_actions_bytype(self
, atype
, implicit_dirs
=False):
3786 """Iterates through the installed actions of type 'atype'. If
3787 'implicit_dirs' is True and 'atype' is 'dir', then include
3788 directories only implicitly defined by other filesystem
3791 if implicit_dirs
and atype
!= "dir":
3792 implicit_dirs
= False
3794 excludes
= self
.list_excludes()
3796 for pfmri
in self
.gen_installed_pkgs():
3797 m
= self
.get_manifest(pfmri
)
3799 for act
in m
.gen_actions_by_type(atype
,
3802 dirs
.add(act
.attrs
["path"])
3805 da
= pkg
.actions
.directory
.DirectoryAction
3806 for d
in m
.get_directories(excludes
):
3808 yield da(path
=d
, implicit
="true"), pfmri
3810 def get_installed_pubs(self
):
3811 """Returns a set containing the prefixes of all publishers with
3812 installed packages."""
3814 cat
= self
.get_catalog(self
.IMG_CATALOG_INSTALLED
)
3815 return cat
.publishers()
3817 def strtofmri(self
, myfmri
):
3818 return pkg
.fmri
.PkgFmri(myfmri
)
3820 def strtomatchingfmri(self
, myfmri
):
3821 return pkg
.fmri
.MatchingPkgFmri(myfmri
)
3823 def get_user_by_name(self
, name
):
3824 uid
= self
._usersbyname
.get(name
, None)
3827 return portable
.get_user_by_name(name
, self
.root
,
3828 self
.type != IMG_USER
)
3830 def get_name_by_uid(self
, uid
, returnuid
= False):
3831 # XXX What to do about IMG_PARTIAL?
3833 return portable
.get_name_by_uid(uid
, self
.root
,
3834 self
.type != IMG_USER
)
3841 def get_group_by_name(self
, name
):
3842 gid
= self
._groupsbyname
.get(name
, None)
3845 return portable
.get_group_by_name(name
, self
.root
,
3846 self
.type != IMG_USER
)
3848 def get_name_by_gid(self
, gid
, returngid
= False):
3850 return portable
.get_name_by_gid(gid
, self
.root
,
3851 self
.type != IMG_USER
)
3858 def update_index_dir(self
, postfix
="index"):
3859 """Since the index directory will not reliably be updated when
3860 the image root is, this should be called prior to using the
3863 if self
.version
== self
.CURRENT_VERSION
:
3864 self
.index_dir
= os
.path
.join(self
.imgdir
, "cache",
3867 self
.index_dir
= os
.path
.join(self
.imgdir
, postfix
)
3869 def cleanup_downloads(self
):
3870 """Clean up any downloads that were in progress but that
3871 did not successfully finish."""
3873 shutil
.rmtree(self
._incoming
_cache
_dir
, True)
3875 def cleanup_cached_content(self
, progtrack
=None):
3876 """Delete the directory that stores all of our cached
3877 downloaded content. This may take a while for a large
3878 directory hierarchy. Don't clean up caches if the
3879 user overrode the underlying setting using PKG_CACHEDIR or
3882 if not self
.cfg
.get_policy(imageconfig
.FLUSH_CONTENT_CACHE
):
3886 for path
, readonly
, pub
, layout
in self
.get_cachedirs():
3887 if readonly
or (self
.__user
_cache
_dir
and
3888 path
.startswith(self
.__user
_cache
_dir
)):
3896 progtrack
= progress
.NullProgressTracker()
3898 # 'Updating package cache'
3899 progtrack
.job_start(progtrack
.JOB_PKG_CACHE
, goal
=len(cdirs
))
3901 shutil
.rmtree(path
, True)
3902 progtrack
.job_add_progress(progtrack
.JOB_PKG_CACHE
)
3903 progtrack
.job_done(progtrack
.JOB_PKG_CACHE
)
3905 def salvage(self
, path
, full_path
=False):
3906 """Called when unexpected file or directory is found during
3907 package operations; returns the path of the salvage
3908 directory where the item was stored. Can be called with
3909 either image-relative or absolute (current) path to file/dir
3910 to be salvaged. If full_path is False (the default), remove
3911 the current mountpoint of the image from the returned
3914 # This ensures that if the path is already rooted in the image,
3915 # that it will be stored in lost+found (due to os.path.join
3916 # behaviour with absolute path components).
3917 if path
.startswith(self
.root
):
3918 path
= path
.replace(self
.root
, "", 1)
3920 if os
.path
.isabs(path
):
3921 # If for some reason the path wasn't rooted in the
3922 # image, but it is an absolute one, then strip the
3923 # absolute part so that it will be stored in lost+found
3924 # (due to os.path.join behaviour with absolute path
3926 path
= os
.path
.splitdrive(path
)[-1].lstrip(os
.path
.sep
)
3928 sdir
= os
.path
.normpath(
3929 os
.path
.join(self
.imgdir
, "lost+found",
3930 path
+ "-" + time
.strftime("%Y%m%dT%H%M%SZ")))
3932 parent
= os
.path
.dirname(sdir
)
3933 if not os
.path
.exists(parent
):
3934 misc
.makedirs(parent
)
3936 orig
= os
.path
.normpath(os
.path
.join(self
.root
, path
))
3938 misc
.move(orig
, sdir
)
3939 # remove current mountpoint from sdir
3941 sdir
.replace(self
.root
, "", 1)
3944 def recover(self
, local_spath
, full_dest_path
):
3945 """Called when recovering directory contents to implement
3946 "salvage-from" directive... full_dest_path must exist."""
3947 source_path
= os
.path
.normpath(os
.path
.join(self
.root
, local_spath
))
3948 for file_name
in os
.listdir(source_path
):
3949 misc
.move(os
.path
.join(source_path
, file_name
),
3950 os
.path
.join(full_dest_path
, file_name
))
3952 def temporary_dir(self
):
3953 """Create a temp directory under the image directory for various
3954 purposes. If the process is unable to create a directory in the
3955 image's temporary directory, a replacement location is found."""
3958 misc
.makedirs(self
.__tmpdir
)
3959 except (apx
.PermissionsException
,
3960 apx
.ReadOnlyFileSystemException
):
3961 self
.__tmpdir
= tempfile
.mkdtemp(prefix
="pkg5tmp-")
3962 atexit
.register(shutil
.rmtree
,
3963 self
.__tmpdir
, ignore_errors
=True)
3964 return self
.temporary_dir()
3967 rval
= tempfile
.mkdtemp(dir=self
.__tmpdir
)
3969 # Force standard mode.
3970 os
.chmod(rval
, misc
.PKG_DIR_MODE
)
3972 except EnvironmentError as e
:
3973 if e
.errno
== errno
.EACCES
or e
.errno
== errno
.EROFS
:
3974 self
.__tmpdir
= tempfile
.mkdtemp(prefix
="pkg5tmp-")
3975 atexit
.register(shutil
.rmtree
,
3976 self
.__tmpdir
, ignore_errors
=True)
3977 return self
.temporary_dir()
3978 raise apx
._convert
_error
(e
)
3980 def temporary_file(self
, close
=True):
3981 """Create a temporary file under the image directory for various
3982 purposes. If 'close' is True, close the file descriptor;
3983 otherwise leave it open. If the process is unable to create a
3984 file in the image's temporary directory, a replacement is
3988 misc
.makedirs(self
.__tmpdir
)
3989 except (apx
.PermissionsException
,
3990 apx
.ReadOnlyFileSystemException
):
3991 self
.__tmpdir
= tempfile
.mkdtemp(prefix
="pkg5tmp-")
3992 atexit
.register(shutil
.rmtree
,
3993 self
.__tmpdir
, ignore_errors
=True)
3994 return self
.temporary_file(close
=close
)
3997 fd
, name
= tempfile
.mkstemp(dir=self
.__tmpdir
)
4000 except EnvironmentError as e
:
4001 if e
.errno
== errno
.EACCES
or e
.errno
== errno
.EROFS
:
4002 self
.__tmpdir
= tempfile
.mkdtemp(prefix
="pkg5tmp-")
4003 atexit
.register(shutil
.rmtree
,
4004 self
.__tmpdir
, ignore_errors
=True)
4005 return self
.temporary_file(close
=close
)
4006 raise apx
._convert
_error
(e
)
4013 def __filter_install_matches(self
, matches
):
4014 """Attempts to eliminate redundant matches found during
4015 packaging operations:
4017 * First, stems of installed packages for publishers that
4018 are now unknown (no longer present in the image
4019 configuration) are dropped.
4021 * Second, if multiple matches are still present, stems of
4022 of installed packages, that are not presently in the
4023 corresponding publisher's catalog, are dropped.
4025 * Finally, if multiple matches are still present, all
4026 stems except for those in state PKG_STATE_INSTALLED are
4029 Returns a list of the filtered matches, along with a dict of
4030 their unique names."""
4035 # First eliminate any duplicate matches that are for unknown
4036 # publishers (publishers which have been removed from the image
4038 publist
= set(p
.prefix
for p
in self
.get_publishers().values())
4039 for m
, st
in matches
:
4040 if m
.publisher
in publist
:
4041 onames
.add(m
.get_pkg_stem())
4042 olist
.append((m
, st
))
4044 # Next, if there are still multiple matches, eliminate matches
4045 # belonging to publishers that no longer have the FMRI in their
4052 if not st
["in_catalog"]:
4054 if st
["state"] == pkgdefs
.PKG_STATE_INSTALLED
:
4056 mnames
.add(m
.get_pkg_stem())
4057 mlist
.append((m
, st
))
4061 # Finally, if there are still multiple matches, and a known
4062 # stem is installed, then eliminate any stems that do not
4063 # have an installed version.
4064 if found_state
and len(onames
) > 1:
4068 if st
["state"] == pkgdefs
.PKG_STATE_INSTALLED
:
4069 mnames
.add(m
.get_pkg_stem())
4070 mlist
.append((m
, st
))
4074 return olist
, onames
4076 def avoid_pkgs(self
, pat_list
, progtrack
, check_cancel
):
4077 """Avoid the specified packages... use pattern matching on
4078 names; ignore versions."""
4080 with self
.locked_op("avoid"):
4081 ip
= imageplan
.ImagePlan
4082 self
._avoid
_set
_save
(self
.avoid_set_get() |
4083 set(ip
.match_user_stems(self
, pat_list
,
4084 ip
.MATCH_UNINSTALLED
)))
4086 def unavoid_pkgs(self
, pat_list
, progtrack
, check_cancel
):
4087 """Unavoid the specified packages... use pattern matching on
4088 names; ignore versions."""
4090 with self
.locked_op("unavoid"):
4091 ip
= imageplan
.ImagePlan
4092 unavoid_set
= set(ip
.match_user_stems(self
, pat_list
,
4094 current_set
= self
.avoid_set_get()
4095 not_avoided
= unavoid_set
- current_set
4097 raise apx
.PlanCreationException(not_avoided
=not_avoided
)
4099 # Don't allow unavoid if removal of the package from the
4100 # avoid list would require the package to be installed
4101 # as this would invalidate current image state. If the
4102 # package is already installed though, it doesn't really
4103 # matter if it's a target of an avoid or not.
4104 installed_set
= set([
4106 for f
in self
.gen_installed_pkgs()
4111 for f
, a
in self
.gen_tracked_stems()
4112 if a
in unavoid_set
and a
not in installed_set
4116 raise apx
.PlanCreationException(would_install
=would_install
)
4118 self
._avoid
_set
_save
(current_set
- unavoid_set
)
4120 def get_avoid_dict(self
):
4121 """ return dict of lists (avoided stem, pkgs w/ group
4122 dependencies on this pkg)"""
4123 ret
= dict((a
, list()) for a
in self
.avoid_set_get())
4124 for fmri
, group
in self
.gen_tracked_stems():
4126 ret
[group
].append(fmri
.pkg_name
)
4129 def freeze_pkgs(self
, pat_list
, progtrack
, check_cancel
, dry_run
,
4131 """Freeze the specified packages... use pattern matching on
4134 The 'pat_list' parameter contains the list of patterns of
4137 The 'progtrack' parameter contains the progress tracker for this
4140 The 'check_cancel' parameter contains a function to call to
4141 check if the operation has been canceled.
4143 The 'dry_run' parameter controls whether packages are actually
4146 The 'comment' parameter contains the comment, if any, which will
4147 be associated with the packages that are frozen.
4150 def __make_publisherless_fmri(pat
):
4151 p
= pkg
.fmri
.MatchingPkgFmri(pat
)
4155 def __calc_frozen():
4156 stems_and_pats
= imageplan
.ImagePlan
.freeze_pkgs_match(
4158 return dict([(s
, __make_publisherless_fmri(p
))
4159 for s
, p
in stems_and_pats
.iteritems()])
4161 return __calc_frozen().values()
4162 with self
.locked_op("freeze"):
4163 stems_and_pats
= __calc_frozen()
4164 # Get existing dictionary of frozen packages.
4165 d
= self
.__freeze
_dict
_load
()
4166 # Update the dictionary with the new freezes and
4168 timestamp
= calendar
.timegm(time
.gmtime())
4169 d
.update([(s
, (str(p
), comment
, timestamp
))
4170 for s
, p
in stems_and_pats
.iteritems()])
4171 self
._freeze
_dict
_save
(d
)
4172 return stems_and_pats
.values()
4174 def unfreeze_pkgs(self
, pat_list
, progtrack
, check_cancel
, dry_run
):
4175 """Unfreeze the specified packages... use pattern matching on
4176 names; ignore versions.
4178 The 'pat_list' parameter contains the list of patterns of
4181 The 'progtrack' parameter contains the progress tracker for this
4184 The 'check_cancel' parameter contains a function to call to
4185 check if the operation has been canceled.
4187 The 'dry_run' parameter controls whether packages are actually
4190 def __calc_unfrozen():
4191 # Get existing dictionary of frozen packages.
4192 d
= self
.__freeze
_dict
_load
()
4193 # Match the user's patterns against the frozen packages
4194 # and return the stems which matched, and the dictionary
4195 # of the currently frozen packages.
4196 ip
= imageplan
.ImagePlan
4197 return set(ip
.match_user_stems(self
, pat_list
,
4198 ip
.MATCH_ALL
, raise_unmatched
=False,
4199 universe
=[(None, k
) for k
in d
.keys()])), d
4202 return __calc_unfrozen()[0]
4203 with self
.locked_op("freeze"):
4204 unfrozen_set
, d
= __calc_unfrozen()
4205 # Remove the specified packages from the frozen set.
4206 for n
in unfrozen_set
:
4208 self
._freeze
_dict
_save
(d
)
4211 def __call_imageplan_evaluate(self
, ip
):
4212 # A plan can be requested without actually performing an
4213 # operation on the image.
4214 if self
.history
.operation_name
:
4215 self
.history
.operation_start_state
= ip
.get_plan()
4219 except apx
.ConflictingActionErrors
:
4220 # Image plan evaluation can fail because of duplicate
4221 # action discovery, but we still want to be able to
4222 # display and log the solved FMRI changes.
4224 if self
.history
.operation_name
:
4225 self
.history
.operation_end_state
= \
4226 "Unevaluated: merged plan had errors\n" + \
4227 ip
.get_plan(full
=False)
4232 if self
.history
.operation_name
:
4233 self
.history
.operation_end_state
= \
4234 ip
.get_plan(full
=False)
4236 def __make_plan_common(self
, _op
, _progtrack
, _check_cancel
,
4237 _noexecute
, _ip_noop
=False, **kwargs
):
4238 """Private helper function to perform base plan creation and
4242 if DebugValues
.get_value("simulate-plan-hang"):
4243 # If pkg5.hang file is present in image dir, then
4244 # sleep after loading configuration until file is
4245 # gone. This is used by the test suite for signal
4246 # handling testing, etc.
4247 hang_file
= os
.path
.join(self
.imgdir
, "pkg5.hang")
4248 with
open(hang_file
, "w") as f
:
4249 f
.write(str(os
.getpid()))
4251 while os
.path
.exists(hang_file
):
4254 # Allow garbage collection of previous plan.
4255 self
.imageplan
= None
4257 ip
= imageplan
.ImagePlan(self
, _op
, _progtrack
, _check_cancel
,
4258 noexecute
=_noexecute
)
4260 # Always start with most current (on-disk) state information.
4261 self
.__init
_catalogs
()
4266 ip
.plan_noop(**kwargs
)
4268 pkgdefs
.API_OP_ATTACH
,
4269 pkgdefs
.API_OP_DETACH
,
4270 pkgdefs
.API_OP_SYNC
]:
4271 ip
.plan_sync(**kwargs
)
4273 pkgdefs
.API_OP_CHANGE_FACET
,
4274 pkgdefs
.API_OP_CHANGE_VARIANT
]:
4275 ip
.plan_change_varcets(**kwargs
)
4276 elif _op
== pkgdefs
.API_OP_DEHYDRATE
:
4277 ip
.plan_dehydrate(**kwargs
)
4278 elif _op
== pkgdefs
.API_OP_INSTALL
:
4279 ip
.plan_install(**kwargs
)
4280 elif _op
==pkgdefs
.API_OP_EXACT_INSTALL
:
4281 ip
.plan_exact_install(**kwargs
)
4282 elif _op
== pkgdefs
.API_OP_FIX
:
4283 ip
.plan_fix(**kwargs
)
4284 elif _op
== pkgdefs
.API_OP_REHYDRATE
:
4285 ip
.plan_rehydrate(**kwargs
)
4286 elif _op
== pkgdefs
.API_OP_REVERT
:
4287 ip
.plan_revert(**kwargs
)
4288 elif _op
== pkgdefs
.API_OP_SET_MEDIATOR
:
4289 ip
.plan_set_mediators(**kwargs
)
4290 elif _op
== pkgdefs
.API_OP_UNINSTALL
:
4291 ip
.plan_uninstall(**kwargs
)
4292 elif _op
== pkgdefs
.API_OP_UPDATE
:
4293 ip
.plan_update(**kwargs
)
4296 "Unknown api op: {0}".format(_op
))
4298 except apx
.ActionExecutionError
as e
:
4300 except pkg
.actions
.ActionError
as e
:
4301 raise apx
.InvalidPackageErrors([e
])
4302 except apx
.ApiException
:
4305 self
.__call
_imageplan
_evaluate
(ip
)
4306 except apx
.ActionExecutionError
as e
:
4308 except pkg
.actions
.ActionError
as e
:
4309 raise apx
.InvalidPackageErrors([e
])
4311 self
.__cleanup
_alt
_pkg
_certs
()
4313 def make_install_plan(self
, op
, progtrack
, check_cancel
,
4314 noexecute
, pkgs_inst
=None, reject_list
=misc
.EmptyI
):
4315 """Take a list of packages, specified in pkgs_inst, and attempt
4316 to assemble an appropriate image plan. This is a helper
4317 routine for some common operations in the client.
4320 progtrack
.plan_all_start()
4322 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
,
4323 noexecute
, pkgs_inst
=pkgs_inst
,
4324 reject_list
=reject_list
)
4326 progtrack
.plan_all_done()
4328 def make_change_varcets_plan(self
, op
, progtrack
, check_cancel
,
4329 noexecute
, facets
=None, reject_list
=misc
.EmptyI
,
4331 """Take a list of variants and/or facets and attempt to
4332 assemble an image plan which changes them. This is a helper
4333 routine for some common operations in the client."""
4335 progtrack
.plan_all_start()
4336 # compute dict of changing variants
4338 new
= set(variants
.iteritems())
4339 cur
= set(self
.cfg
.variants
.iteritems())
4340 variants
= dict(new
- cur
)
4342 new_facets
= self
.get_facets()
4344 if facets
[f
] is None:
4345 new_facets
.pop(f
, None)
4347 new_facets
[f
] = facets
[f
]
4350 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
,
4351 noexecute
, new_variants
=variants
, new_facets
=facets
,
4352 reject_list
=reject_list
)
4354 progtrack
.plan_all_done()
4356 def make_set_mediators_plan(self
, op
, progtrack
, check_cancel
,
4357 noexecute
, mediators
):
4358 """Take a dictionary of mediators and attempt to assemble an
4359 appropriate image plan to set or revert them based on the
4360 provided version and implementation values. This is a helper
4361 routine for some common operations in the client.
4364 progtrack
.plan_all_start()
4366 # Compute dict of changing mediators.
4367 new_mediators
= copy
.deepcopy(mediators
)
4368 old_mediators
= self
.cfg
.mediators
4369 invalid_mediations
= collections
.defaultdict(dict)
4370 for m
in new_mediators
.keys():
4371 new_values
= new_mediators
[m
]
4373 if m
not in old_mediators
:
4374 # Nothing to revert.
4375 del new_mediators
[m
]
4378 # Revert mediator to defaults.
4379 new_mediators
[m
] = {}
4382 # Validate mediator, provided version, implementation,
4384 valid
, error
= med
.valid_mediator(m
)
4386 invalid_mediations
[m
]["mediator"] = (m
, error
)
4388 med_version
= new_values
.get("version")
4390 valid
, error
= med
.valid_mediator_version(
4393 new_mediators
[m
]["version"] = \
4394 pkg
.version
.Version(med_version
)
4396 invalid_mediations
[m
]["version"] = \
4397 (med_version
, error
)
4399 med_impl
= new_values
.get("implementation")
4401 valid
, error
= med
.valid_mediator_implementation(
4402 med_impl
, allow_empty_version
=True)
4404 invalid_mediations
[m
]["version"] = \
4407 if invalid_mediations
:
4408 raise apx
.PlanCreationException(
4409 invalid_mediations
=invalid_mediations
)
4411 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
,
4412 noexecute
, new_mediators
=new_mediators
)
4414 progtrack
.plan_all_done()
4416 def make_sync_plan(self
, op
, progtrack
, check_cancel
,
4417 noexecute
, li_pkg_updates
=True, reject_list
=misc
.EmptyI
):
4418 """Attempt to create an appropriate image plan to bring an
4419 image in sync with it's linked image constraints. This is a
4420 helper routine for some common operations in the client."""
4422 progtrack
.plan_all_start()
4424 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
,
4425 noexecute
, reject_list
=reject_list
,
4426 li_pkg_updates
=li_pkg_updates
)
4428 progtrack
.plan_all_done()
4430 def make_uninstall_plan(self
, op
, progtrack
, check_cancel
,
4431 ignore_missing
, noexecute
, pkgs_to_uninstall
):
4432 """Create uninstall plan to remove the specified packages."""
4434 progtrack
.plan_all_start()
4436 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
,
4437 noexecute
, ignore_missing
=ignore_missing
,
4438 pkgs_to_uninstall
=pkgs_to_uninstall
)
4440 progtrack
.plan_all_done()
4442 def make_update_plan(self
, op
, progtrack
, check_cancel
,
4443 noexecute
, ignore_missing
=False, pkgs_update
=None,
4444 reject_list
=misc
.EmptyI
):
4445 """Create a plan to update all packages or the specific ones as
4446 far as possible. This is a helper routine for some common
4447 operations in the client.
4450 progtrack
.plan_all_start()
4451 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
,
4452 noexecute
, ignore_missing
=ignore_missing
,
4453 pkgs_update
=pkgs_update
, reject_list
=reject_list
)
4454 progtrack
.plan_all_done()
4456 def make_revert_plan(self
, op
, progtrack
, check_cancel
,
4457 noexecute
, args
, tagged
):
4458 """Revert the specified files, or all files tagged as specified
4459 in args to their manifest definitions.
4462 progtrack
.plan_all_start()
4463 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
,
4464 noexecute
, args
=args
, tagged
=tagged
)
4465 progtrack
.plan_all_done()
4467 def make_dehydrate_plan(self
, op
, progtrack
, check_cancel
, noexecute
,
4469 """Remove non-editable files and hardlinks from an image."""
4471 progtrack
.plan_all_start()
4472 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
,
4473 noexecute
, publishers
=publishers
)
4474 progtrack
.plan_all_done()
4476 def make_rehydrate_plan(self
, op
, progtrack
, check_cancel
, noexecute
,
4478 """Reinstall non-editable files and hardlinks to an dehydrated
4481 progtrack
.plan_all_start()
4482 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
,
4483 noexecute
, publishers
=publishers
)
4484 progtrack
.plan_all_done()
4486 def make_fix_plan(self
, op
, progtrack
, check_cancel
, noexecute
, args
):
4487 """Create an image plan to fix the image."""
4489 progtrack
.plan_all_start()
4490 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
, noexecute
,
4492 progtrack
.plan_all_done()
4494 def make_noop_plan(self
, op
, progtrack
, check_cancel
,
4496 """Create an image plan that doesn't update the image in any
4499 progtrack
.plan_all_start()
4500 self
.__make
_plan
_common
(op
, progtrack
, check_cancel
,
4501 noexecute
, _ip_noop
=True)
4502 progtrack
.plan_all_done()
4504 def ipkg_is_up_to_date(self
, check_cancel
, noexecute
,
4505 refresh_allowed
=True, progtrack
=None):
4506 """Test whether the packaging system is updated to the latest
4507 version known to be available for this image."""
4510 # This routine makes the distinction between the "target image",
4511 # which will be altered, and the "running image", which is
4512 # to say whatever image appears to contain the version of the
4513 # pkg command we're running.
4517 # There are two relevant cases here:
4518 # 1) Packaging code and image we're updating are the same
4519 # image. (i.e. 'pkg update')
4521 # 2) Packaging code's image and the image we're updating are
4522 # different (i.e. 'pkg update -R')
4524 # In general, we care about getting the user to run the
4525 # most recent packaging code available for their build. So,
4526 # if we're not in the liveroot case, we create a new image
4527 # which represents "/" on the system.
4531 progtrack
= progress
.NullProgressTracker()
4535 if self
.__cmddir
and not img
.is_liveroot():
4537 # Find the path to ourselves, and use that
4538 # as a way to locate the image we're in. It's
4539 # not perfect-- we could be in a developer's
4540 # workspace, for example.
4542 newimg
= Image(self
.__cmddir
,
4543 allow_ondisk_upgrade
=False, progtrack
=progtrack
,
4544 cmdpath
=self
.cmdpath
)
4547 # If refreshing publisher metadata is allowed,
4548 # then perform a refresh so that a new packaging
4549 # system package can be discovered.
4550 newimg
.lock(allow_unprivileged
=True)
4552 newimg
.refresh_publishers(
4553 progtrack
=progtrack
)
4554 except (apx
.ImageFormatUpdateNeeded
,
4555 apx
.PermissionsException
):
4556 # Can't use the image to perform an
4557 # update check and it would be wrong
4558 # to prevent the operation from
4559 # continuing in these cases.
4561 except apx
.CatalogRefreshException
as cre
:
4563 _("pkg(5) update check failed.")
4571 pfmri
= img
.get_version_installed(img
.strtofmri("package/pkg"))
4573 not pkgdefs
.PKG_STATE_UPGRADABLE
in img
.get_pkg_state(pfmri
):
4574 # If no version of the package system is installed or a
4575 # newer version isn't available, then the client is
4579 inc_fmri
= img
.get_version_installed(img
.strtofmri(
4580 "consolidation/ips/ips-incorporation"))
4582 # If the ips-incorporation is installed (it should be
4583 # since package/pkg depends on it), then we can
4584 # bypass the solver and plan evaluation if none of the
4585 # newer versions are allowed by the incorporation.
4587 # Find the version at which package/pkg is incorporated.
4588 cat
= img
.get_catalog(img
.IMG_CATALOG_KNOWN
)
4590 for act
in cat
.get_entry_actions(inc_fmri
, [cat
.DEPENDENCY
],
4591 excludes
=img
.list_excludes()):
4592 if act
.name
== "depend" and \
4593 act
.attrs
["type"] == "incorporate" and \
4594 act
.attrs
["fmri"].startswith("package/pkg"):
4595 inc_ver
= img
.strtofmri(
4596 act
.attrs
["fmri"]).version
4600 for ver
, fmris
in cat
.fmris_by_version(
4602 if ver
!= pfmri
.version
and \
4603 ver
.is_successor(inc_ver
,
4604 pkg
.version
.CONSTRAINT_AUTO
):
4607 # No version is newer than installed and
4608 # satisfied incorporation constraint.
4611 # XXX call to progress tracker that the package is being
4613 img
.make_install_plan(pkgdefs
.API_OP_INSTALL
, progtrack
,
4614 check_cancel
, noexecute
, pkgs_inst
=["pkg:/package/pkg"])
4616 return img
.imageplan
.nothingtodo()
4618 # avoid set implementation uses simplejson to store a
4619 # set of pkg_stems being avoided, and a set of tracked
4620 # stems that are obsolete.
4622 # format is (version, dict((pkg stem, "avoid" or "obsolete"))
4624 __AVOID_SET_VERSION
= 1
4626 def avoid_set_get(self
):
4627 """Return copy of avoid set"""
4628 return self
.__avoid
_set
.copy()
4630 def obsolete_set_get(self
):
4631 """Return copy of tracked obsolete pkgs"""
4632 return self
.__group
_obsolete
.copy()
4634 def __avoid_set_load(self
):
4635 """Load avoid set fron image state directory"""
4636 state_file
= os
.path
.join(self
._statedir
, "avoid_set")
4637 self
.__avoid
_set
= set()
4638 self
.__group
_obsolete
= set()
4639 if os
.path
.isfile(state_file
):
4640 version
, d
= json
.load(file(state_file
))
4641 assert version
== self
.__AVOID
_SET
_VERSION
4643 if d
[stem
] == "avoid":
4644 self
.__avoid
_set
.add(stem
)
4645 elif d
[stem
] == "obsolete":
4646 self
.__group
_obsolete
.add(stem
)
4648 logger
.warn("Corrupted avoid list - ignoring")
4649 self
.__avoid
_set
= set()
4650 self
.__group
_obsolete
= set()
4651 self
.__avoid
_set
_altered
= True
4653 self
.__avoid
_set
_altered
= True
4655 def _avoid_set_save(self
, new_set
=None, obsolete
=None):
4656 """Store avoid set to image state directory"""
4657 if new_set
is not None:
4658 self
.__avoid
_set
_altered
= True
4659 self
.__avoid
_set
= new_set
4661 if obsolete
is not None:
4662 self
.__group
_obsolete
= obsolete
4663 self
.__avoid
_set
_altered
= True
4665 if not self
.__avoid
_set
_altered
:
4669 state_file
= os
.path
.join(self
._statedir
, "avoid_set")
4670 tmp_file
= os
.path
.join(self
._statedir
, "avoid_set.new")
4671 tf
= file(tmp_file
, "w")
4673 d
= dict((a
, "avoid") for a
in self
.__avoid
_set
)
4674 d
.update((a
, "obsolete") for a
in self
.__group
_obsolete
)
4677 json
.dump((self
.__AVOID
_SET
_VERSION
, d
), tf
)
4679 portable
.rename(tmp_file
, state_file
)
4681 except Exception as e
:
4682 logger
.warn("Cannot save avoid list: {0}".format(
4686 self
.__avoid
_set
_altered
= False
4688 # frozen dict implementation uses simplejson to store a dictionary of
4689 # pkg_stems that are frozen, the versions at which they're frozen, and
4690 # the reason, if given, why the package was frozen.
4692 # format is (version, dict((pkg stem, (fmri, comment, timestamp))))
4694 __FROZEN_DICT_VERSION
= 1
4696 def get_frozen_list(self
):
4697 """Return a list of tuples containing the fmri that was frozen,
4698 and the reason it was frozen."""
4701 (pkg
.fmri
.MatchingPkgFmri(v
[0]), v
[1], v
[2])
4702 for v
in self
.__freeze
_dict
_load
().values()
4705 def __freeze_dict_load(self
):
4706 """Load the dictionary containing the current state of frozen
4709 state_file
= os
.path
.join(self
._statedir
, "frozen_dict")
4710 if os
.path
.isfile(state_file
):
4712 version
, d
= json
.load(file(state_file
))
4713 except EnvironmentError as e
:
4714 raise apx
._convert
_error
(e
)
4715 except ValueError as e
:
4716 raise apx
.InvalidFreezeFile(state_file
)
4717 if version
!= self
.__FROZEN
_DICT
_VERSION
:
4718 raise apx
.UnknownFreezeFileVersion(
4719 version
, self
.__FROZEN
_DICT
_VERSION
,
4724 def _freeze_dict_save(self
, new_dict
):
4725 """Save the dictionary of frozen packages."""
4727 # Save the dictionary to disk.
4728 state_file
= os
.path
.join(self
._statedir
, "frozen_dict")
4729 tmp_file
= os
.path
.join(self
._statedir
, "frozen_dict.new")
4732 with
open(tmp_file
, "w") as tf
:
4734 (self
.__FROZEN
_DICT
_VERSION
, new_dict
), tf
)
4735 portable
.rename(tmp_file
, state_file
)
4736 except EnvironmentError as e
:
4737 raise apx
._convert
_error
(e
)
4738 self
.__rebuild
_image
_catalogs
()
4741 def get_dehydrated_exclude_func(dehydrated_pubs
):
4742 """A boolean function that will be added to the pkg(5) exclude
4743 mechanism to determine if an action is allowed to be installed
4744 based on whether its publisher is going to be dehydrated or has
4745 been currently dehydrated."""
4747 # A closure is used so that the list of dehydrated publishers
4749 def __allow_action_dehydrate(act
, publisher
):
4750 if publisher
not in dehydrated_pubs
:
4751 # Allow actions from publishers that are not
4758 if attrs
.get("dehydrate") == "false":
4760 if "preserve" in attrs
or "overlay" in attrs
:
4763 elif aname
== "hardlink":
4768 return __allow_action_dehydrate