KCC: Copy KCC core out of samba_kcc into samba.kcc module
[Samba.git] / python / samba / kcc / __init__.py
blob1c1e6d94ebcaa38fbc9ea0c9235864699d5ed918
1 #!/usr/bin/env python
3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
6 # Copyright (C) Andrew Bartlett 2015
8 # Andrew Bartlett's alleged work performed by his underlings Douglas
9 # Bagnall and Garming Sam.
11 # This program is free software; you can redistribute it and/or modify
12 # it under the terms of the GNU General Public License as published by
13 # the Free Software Foundation; either version 3 of the License, or
14 # (at your option) any later version.
16 # This program is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU General Public License for more details.
21 # You should have received a copy of the GNU General Public License
22 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 import os
25 import sys
26 import random
27 import uuid
29 # ensure we get messages out immediately, so they get in the samba logs,
30 # and don't get swallowed by a timeout
31 os.environ['PYTHONUNBUFFERED'] = '1'
33 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
34 # heimdal can get mutual authentication errors due to the 24 second difference
35 # between UTC and GMT when using some zone files (eg. the PDT zone from
36 # the US)
37 os.environ["TZ"] = "GMT"
39 # Find right directory when running from source tree
40 sys.path.insert(0, "bin/python")
42 import optparse
43 import logging
44 import itertools
45 import heapq
46 import time
47 from functools import partial
49 from samba import (
50 getopt as options,
51 ldb,
52 dsdb,
53 drs_utils,
54 nttime2unix)
55 from samba.auth import system_session
56 from samba.samdb import SamDB
57 from samba.dcerpc import drsuapi
58 from samba.kcc_utils import *
59 from samba.graph_utils import *
60 from samba import ldif_utils
63 class KCC(object):
64 """The Knowledge Consistency Checker class.
66 A container for objects and methods allowing a run of the KCC. Produces a
67 set of connections in the samdb for which the Distributed Replication
68 Service can then utilize to replicate naming contexts
70 :param unix_now: The putative current time in seconds since 1970.
71 :param read_only: Don't write to the database.
72 :param verify: Check topological invariants for the generated graphs
73 :param debug: Write verbosely to stderr.
74 "param dot_files: write Graphviz files in /tmp showing topology
75 """
76 def __init__(self):
77 """Initializes the partitions class which can hold
78 our local DCs partitions or all the partitions in
79 the forest
80 """
81 self.part_table = {} # partition objects
82 self.site_table = {}
83 self.transport_table = {}
84 self.ip_transport = None
85 self.sitelink_table = {}
86 self.dsa_by_dnstr = {}
87 self.dsa_by_guid = {}
89 self.get_dsa_by_guidstr = self.dsa_by_guid.get
90 self.get_dsa = self.dsa_by_dnstr.get
92 # TODO: These should be backed by a 'permanent' store so that when
93 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
94 # the failure information can be returned
95 self.kcc_failed_links = {}
96 self.kcc_failed_connections = set()
98 # Used in inter-site topology computation. A list
99 # of connections (by NTDSConnection object) that are
100 # to be kept when pruning un-needed NTDS Connections
101 self.kept_connections = set()
103 self.my_dsa_dnstr = None # My dsa DN
104 self.my_dsa = None # My dsa object
106 self.my_site_dnstr = None
107 self.my_site = None
109 self.samdb = None
111 def load_all_transports(self):
112 """Loads the inter-site transport objects for Sites
114 :return: None
115 :raise KCCError: if no IP transport is found
117 try:
118 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
119 self.samdb.get_config_basedn(),
120 scope=ldb.SCOPE_SUBTREE,
121 expression="(objectClass=interSiteTransport)")
122 except ldb.LdbError, (enum, estr):
123 raise KCCError("Unable to find inter-site transports - (%s)" %
124 estr)
126 for msg in res:
127 dnstr = str(msg.dn)
129 transport = Transport(dnstr)
131 transport.load_transport(self.samdb)
132 self.transport_table.setdefault(str(transport.guid),
133 transport)
134 if transport.name == 'IP':
135 self.ip_transport = transport
137 if self.ip_transport is None:
138 raise KCCError("there doesn't seem to be an IP transport")
140 def load_all_sitelinks(self):
141 """Loads the inter-site siteLink objects
143 :return: None
144 :raise KCCError: if site-links aren't found
146 try:
147 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
148 self.samdb.get_config_basedn(),
149 scope=ldb.SCOPE_SUBTREE,
150 expression="(objectClass=siteLink)")
151 except ldb.LdbError, (enum, estr):
152 raise KCCError("Unable to find inter-site siteLinks - (%s)" % estr)
154 for msg in res:
155 dnstr = str(msg.dn)
157 # already loaded
158 if dnstr in self.sitelink_table:
159 continue
161 sitelink = SiteLink(dnstr)
163 sitelink.load_sitelink(self.samdb)
165 # Assign this siteLink to table
166 # and index by dn
167 self.sitelink_table[dnstr] = sitelink
169 def load_site(self, dn_str):
170 """Helper for load_my_site and load_all_sites.
172 Put all the site's DSAs into the KCC indices.
174 :param dn_str: a site dn_str
175 :return: the Site object pertaining to the dn_str
177 site = Site(dn_str, unix_now)
178 site.load_site(self.samdb)
180 # We avoid replacing the site with an identical copy in case
181 # somewhere else has a reference to the old one, which would
182 # lead to all manner of confusion and chaos.
183 guid = str(site.site_guid)
184 if guid not in self.site_table:
185 self.site_table[guid] = site
186 self.dsa_by_dnstr.update(site.dsa_table)
187 self.dsa_by_guid.update((str(x.dsa_guid), x)
188 for x in site.dsa_table.values())
190 return self.site_table[guid]
192 def load_my_site(self):
193 """Load the Site object for the local DSA.
195 :return: None
197 self.my_site_dnstr = ("CN=%s,CN=Sites,%s" % (
198 self.samdb.server_site_name(),
199 self.samdb.get_config_basedn()))
201 self.my_site = self.load_site(self.my_site_dnstr)
203 def load_all_sites(self):
204 """Discover all sites and create Site objects.
206 :return: None
207 :raise: KCCError if sites can't be found
209 try:
210 res = self.samdb.search("CN=Sites,%s" %
211 self.samdb.get_config_basedn(),
212 scope=ldb.SCOPE_SUBTREE,
213 expression="(objectClass=site)")
214 except ldb.LdbError, (enum, estr):
215 raise KCCError("Unable to find sites - (%s)" % estr)
217 for msg in res:
218 sitestr = str(msg.dn)
219 self.load_site(sitestr)
221 def load_my_dsa(self):
222 """Discover my nTDSDSA dn thru the rootDSE entry
224 :return: None
225 :raise: KCCError if DSA can't be found
227 dn = ldb.Dn(self.samdb, "<GUID=%s>" % self.samdb.get_ntds_GUID())
228 try:
229 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
230 attrs=["objectGUID"])
231 except ldb.LdbError, (enum, estr):
232 logger.warning("Search for %s failed: %s. This typically happens"
233 " in --importldif mode due to lack of module"
234 " support.", dn, estr)
235 try:
236 # We work around the failure above by looking at the
237 # dsServiceName that was put in the fake rootdse by
238 # the --exportldif, rather than the
239 # samdb.get_ntds_GUID(). The disadvantage is that this
240 # mode requires we modify the @ROOTDSE dnq to support
241 # --forced-local-dsa
242 service_name_res = self.samdb.search(base="",
243 scope=ldb.SCOPE_BASE,
244 attrs=["dsServiceName"])
245 dn = ldb.Dn(self.samdb,
246 service_name_res[0]["dsServiceName"][0])
248 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
249 attrs=["objectGUID"])
250 except ldb.LdbError, (enum, estr):
251 raise KCCError("Unable to find my nTDSDSA - (%s)" % estr)
253 if len(res) != 1:
254 raise KCCError("Unable to find my nTDSDSA at %s" %
255 dn.extended_str())
257 ntds_guid = misc.GUID(self.samdb.get_ntds_GUID())
258 if misc.GUID(res[0]["objectGUID"][0]) != ntds_guid:
259 raise KCCError("Did not find the GUID we expected,"
260 " perhaps due to --importldif")
262 self.my_dsa_dnstr = str(res[0].dn)
264 self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
266 if self.my_dsa_dnstr not in self.dsa_by_dnstr:
267 DEBUG_DARK_YELLOW("my_dsa %s isn't in self.dsas_by_dnstr:"
268 " it must be RODC.\n"
269 "Let's add it, because my_dsa is special!\n"
270 "(likewise for self.dsa_by_guid of course)" %
271 self.my_dsas_dnstr)
273 self.dsa_by_dnstr[self.my_dsa_dnstr] = self.my_dsa
274 self.dsa_by_guid[str(self.my_dsa.dsa_guid)] = self.my_dsa
276 def load_all_partitions(self):
277 """Discover and load all partitions.
279 Each NC is inserted into the part_table by partition
280 dn string (not the nCName dn string)
282 :return: None
283 :raise: KCCError if partitions can't be found
285 try:
286 res = self.samdb.search("CN=Partitions,%s" %
287 self.samdb.get_config_basedn(),
288 scope=ldb.SCOPE_SUBTREE,
289 expression="(objectClass=crossRef)")
290 except ldb.LdbError, (enum, estr):
291 raise KCCError("Unable to find partitions - (%s)" % estr)
293 for msg in res:
294 partstr = str(msg.dn)
296 # already loaded
297 if partstr in self.part_table:
298 continue
300 part = Partition(partstr)
302 part.load_partition(self.samdb)
303 self.part_table[partstr] = part
305 def should_be_present_test(self):
306 """Enumerate all loaded partitions and DSAs in local
307 site and test if NC should be present as replica
309 for partdn, part in self.part_table.items():
310 for dsadn, dsa in self.my_site.dsa_table.items():
311 needed, ro, partial = part.should_be_present(dsa)
312 logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
313 (dsadn, part.nc_dnstr, needed, ro, partial))
315 def refresh_failed_links_connections(self):
316 """Based on MS-ADTS 6.2.2.1"""
318 # Instead of NULL link with failure_count = 0, the tuple is
319 # simply removed
321 # LINKS: Refresh failed links
322 self.kcc_failed_links = {}
323 current, needed = self.my_dsa.get_rep_tables()
324 for replica in current.values():
325 # For every possible connection to replicate
326 for reps_from in replica.rep_repsFrom:
327 failure_count = reps_from.consecutive_sync_failures
328 if failure_count <= 0:
329 continue
331 dsa_guid = str(reps_from.source_dsa_obj_guid)
332 time_first_failure = reps_from.last_success
333 last_result = reps_from.last_attempt
334 dns_name = reps_from.dns_name1
336 f = self.kcc_failed_links.get(dsa_guid)
337 if not f:
338 f = KCCFailedObject(dsa_guid, failure_count,
339 time_first_failure, last_result,
340 dns_name)
341 self.kcc_failed_links[dsa_guid] = f
342 #elif f.failure_count == 0:
343 # f.failure_count = failure_count
344 # f.time_first_failure = time_first_failure
345 # f.last_result = last_result
346 else:
347 f.failure_count = max(f.failure_count, failure_count)
348 f.time_first_failure = min(f.time_first_failure,
349 time_first_failure)
350 f.last_result = last_result
352 # CONNECTIONS: Refresh failed connections
353 restore_connections = set()
354 if opts.attempt_live_connections:
355 DEBUG("refresh_failed_links: checking if links are still down")
356 for connection in self.kcc_failed_connections:
357 try:
358 drs_utils.drsuapi_connect(connection.dns_name, lp, creds)
359 # Failed connection is no longer failing
360 restore_connections.add(connection)
361 except drs_utils.drsException:
362 # Failed connection still failing
363 connection.failure_count += 1
364 else:
365 DEBUG("refresh_failed_links: not checking live links because we\n"
366 "weren't asked to --attempt-live-connections")
368 # Remove the restored connections from the failed connections
369 self.kcc_failed_connections.difference_update(restore_connections)
371 def is_stale_link_connection(self, target_dsa):
372 """Check whether a link to a remote DSA is stale
374 Used in MS-ADTS 6.2.2.2 Intrasite Connection Creation
376 Returns True if the remote seems to have been down for at
377 least two hours, otherwise False.
379 :param target_dsa: the remote DSA object
380 :return: True if link is stale, otherwise False
382 failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
383 if failed_link:
384 # failure_count should be > 0, but check anyways
385 if failed_link.failure_count > 0:
386 unix_first_failure = \
387 nttime2unix(failed_link.time_first_failure)
388 # TODO guard against future
389 if unix_first_failure > unix_now:
390 logger.error("The last success time attribute for \
391 repsFrom is in the future!")
393 # Perform calculation in seconds
394 if (unix_now - unix_first_failure) > 60 * 60 * 2:
395 return True
397 # TODO connections
399 return False
401 # TODO: This should be backed by some form of local database
402 def remove_unneeded_failed_links_connections(self):
403 # Remove all tuples in kcc_failed_links where failure count = 0
404 # In this implementation, this should never happen.
406 # Remove all connections which were not used this run or connections
407 # that became active during this run.
408 pass
410 def remove_unneeded_ntdsconn(self, all_connected):
411 """Remove unneeded NTDS Connections once topology is calculated
413 Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections
415 :param all_connected: indicates whether all sites are connected
416 :return: None
418 mydsa = self.my_dsa
420 # New connections won't have GUIDs which are needed for
421 # sorting. Add them.
422 for cn_conn in mydsa.connect_table.values():
423 if cn_conn.guid is None:
424 if opts.readonly:
425 cn_conn.guid = misc.GUID(str(uuid.uuid4()))
426 cn_conn.whenCreated = nt_now
427 else:
428 cn_conn.load_connection(self.samdb)
430 for cn_conn in mydsa.connect_table.values():
432 s_dnstr = cn_conn.get_from_dnstr()
433 if s_dnstr is None:
434 cn_conn.to_be_deleted = True
435 continue
437 # Get the source DSA no matter what site
438 # XXX s_dsa is NEVER USED. It will be removed.
439 s_dsa = self.get_dsa(s_dnstr)
441 #XXX should an RODC be regarded as same site
442 same_site = s_dnstr in self.my_site.dsa_table
444 # Given an nTDSConnection object cn, if the DC with the
445 # nTDSDSA object dc that is the parent object of cn and
446 # the DC with the nTDSDA object referenced by cn!fromServer
447 # are in the same site, the KCC on dc deletes cn if all of
448 # the following are true:
450 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
452 # No site settings object s exists for the local DC's site, or
453 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
454 # s!options.
456 # Another nTDSConnection object cn2 exists such that cn and
457 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
458 # and either
460 # cn!whenCreated < cn2!whenCreated
462 # cn!whenCreated = cn2!whenCreated and
463 # cn!objectGUID < cn2!objectGUID
465 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
466 if same_site:
467 if not cn_conn.is_generated():
468 continue
470 if self.my_site.is_cleanup_ntdsconn_disabled():
471 continue
473 # Loop thru connections looking for a duplicate that
474 # fulfills the previous criteria
475 lesser = False
476 packed_guid = ndr_pack(cn_conn.guid)
477 for cn2_conn in mydsa.connect_table.values():
478 if cn2_conn is cn_conn:
479 continue
481 s2_dnstr = cn2_conn.get_from_dnstr()
483 # If the NTDS Connections has a different
484 # fromServer field then no match
485 if s2_dnstr != s_dnstr:
486 continue
488 #XXX GUID comparison
489 lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or
490 (cn_conn.whenCreated == cn2_conn.whenCreated and
491 packed_guid < ndr_pack(cn2_conn.guid)))
493 if lesser:
494 break
496 if lesser and not cn_conn.is_rodc_topology():
497 cn_conn.to_be_deleted = True
499 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
500 # object dc that is the parent object of cn and the DC with
501 # the nTDSDSA object referenced by cn!fromServer are in
502 # different sites, a KCC acting as an ISTG in dc's site
503 # deletes cn if all of the following are true:
505 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
507 # cn!fromServer references an nTDSDSA object for a DC
508 # in a site other than the local DC's site.
510 # The keepConnections sequence returned by
511 # CreateIntersiteConnections() does not contain
512 # cn!objectGUID, or cn is "superseded by" (see below)
513 # another nTDSConnection cn2 and keepConnections
514 # contains cn2!objectGUID.
516 # The return value of CreateIntersiteConnections()
517 # was true.
519 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
520 # cn!options
522 else: # different site
524 if not mydsa.is_istg():
525 continue
527 if not cn_conn.is_generated():
528 continue
530 # TODO
531 # We are directly using this connection in intersite or
532 # we are using a connection which can supersede this one.
534 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
535 # appear to be correct.
537 # 1. cn!fromServer and cn!parent appear inconsistent with
538 # no cn2
539 # 2. The repsFrom do not imply each other
541 if cn_conn in self.kept_connections: # and not_superceded:
542 continue
544 # This is the result of create_intersite_connections
545 if not all_connected:
546 continue
548 if not cn_conn.is_rodc_topology():
549 cn_conn.to_be_deleted = True
551 if mydsa.is_ro() or opts.readonly:
552 for connect in mydsa.connect_table.values():
553 if connect.to_be_deleted:
554 DEBUG_FN("TO BE DELETED:\n%s" % connect)
555 if connect.to_be_added:
556 DEBUG_FN("TO BE ADDED:\n%s" % connect)
558 # Peform deletion from our tables but perform
559 # no database modification
560 mydsa.commit_connections(self.samdb, ro=True)
561 else:
562 # Commit any modified connections
563 mydsa.commit_connections(self.samdb)
565 def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
566 """Update an repsFrom object if required.
568 Part of MS-ADTS 6.2.2.5.
570 Update t_repsFrom if necessary to satisfy requirements. Such
571 updates are typically required when the IDL_DRSGetNCChanges
572 server has moved from one site to another--for example, to
573 enable compression when the server is moved from the
574 client's site to another site.
576 The repsFrom.update_flags bit field may be modified
577 auto-magically if any changes are made here. See
578 kcc_utils.RepsFromTo for gory details.
581 :param n_rep: NC replica we need
582 :param t_repsFrom: repsFrom tuple to modify
583 :param s_rep: NC replica at source DSA
584 :param s_dsa: source DSA
585 :param cn_conn: Local DSA NTDSConnection child
587 :return: None
589 s_dnstr = s_dsa.dsa_dnstr
590 update = 0x0
592 same_site = s_dnstr in self.my_site.dsa_table
594 # if schedule doesn't match then update and modify
595 times = convert_schedule_to_repltimes(cn_conn.schedule)
596 if times != t_repsFrom.schedule:
597 t_repsFrom.schedule = times
598 update |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
600 # Bit DRS_PER_SYNC is set in replicaFlags if and only
601 # if nTDSConnection schedule has a value v that specifies
602 # scheduled replication is to be performed at least once
603 # per week.
604 if cn_conn.is_schedule_minimum_once_per_week():
606 if ((t_repsFrom.replica_flags &
607 drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0):
608 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
610 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
611 # if the source DSA and the local DC's nTDSDSA object are
612 # in the same site or source dsa is the FSMO role owner
613 # of one or more FSMO roles in the NC replica.
614 if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
616 if ((t_repsFrom.replica_flags &
617 drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0):
618 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
620 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
621 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
622 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
623 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
624 # t.replicaFlags if and only if s and the local DC's
625 # nTDSDSA object are in different sites.
626 if ((cn_conn.options &
627 dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0):
629 if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
630 # XXX WARNING
632 # it LOOKS as if this next test is a bit silly: it
633 # checks the flag then sets it if it not set; the same
634 # effect could be achieved by unconditionally setting
635 # it. But in fact the repsFrom object has special
636 # magic attached to it, and altering replica_flags has
637 # side-effects. That is bad in my opinion, but there
638 # you go.
639 if ((t_repsFrom.replica_flags &
640 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
641 t_repsFrom.replica_flags |= \
642 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
644 elif not same_site:
646 if ((t_repsFrom.replica_flags &
647 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
648 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
650 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
651 # and only if s and the local DC's nTDSDSA object are
652 # not in the same site and the
653 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
654 # clear in cn!options
655 if (not same_site and
656 (cn_conn.options &
657 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
659 if ((t_repsFrom.replica_flags &
660 drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0):
661 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
663 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
664 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
665 if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
667 if ((t_repsFrom.replica_flags &
668 drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0):
669 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
671 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
672 # set in t.replicaFlags if and only if cn!enabledConnection = false.
673 if not cn_conn.is_enabled():
675 if ((t_repsFrom.replica_flags &
676 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0):
677 t_repsFrom.replica_flags |= \
678 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
680 if ((t_repsFrom.replica_flags &
681 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0):
682 t_repsFrom.replica_flags |= \
683 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
685 # If s and the local DC's nTDSDSA object are in the same site,
686 # cn!transportType has no value, or the RDN of cn!transportType
687 # is CN=IP:
689 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
691 # t.uuidTransport = NULL GUID.
693 # t.uuidDsa = The GUID-based DNS name of s.
695 # Otherwise:
697 # Bit DRS_MAIL_REP in t.replicaFlags is set.
699 # If x is the object with dsname cn!transportType,
700 # t.uuidTransport = x!objectGUID.
702 # Let a be the attribute identified by
703 # x!transportAddressAttribute. If a is
704 # the dNSHostName attribute, t.uuidDsa = the GUID-based
705 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
707 # It appears that the first statement i.e.
709 # "If s and the local DC's nTDSDSA object are in the same
710 # site, cn!transportType has no value, or the RDN of
711 # cn!transportType is CN=IP:"
713 # could be a slightly tighter statement if it had an "or"
714 # between each condition. I believe this should
715 # be interpreted as:
717 # IF (same-site) OR (no-value) OR (type-ip)
719 # because IP should be the primary transport mechanism
720 # (even in inter-site) and the absense of the transportType
721 # attribute should always imply IP no matter if its multi-site
723 # NOTE MS-TECH INCORRECT:
725 # All indications point to these statements above being
726 # incorrectly stated:
728 # t.uuidDsa = The GUID-based DNS name of s.
730 # Let a be the attribute identified by
731 # x!transportAddressAttribute. If a is
732 # the dNSHostName attribute, t.uuidDsa = the GUID-based
733 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
735 # because the uuidDSA is a GUID and not a GUID-base DNS
736 # name. Nor can uuidDsa hold (s!parent)!a if not
737 # dNSHostName. What should have been said is:
739 # t.naDsa = The GUID-based DNS name of s
741 # That would also be correct if transportAddressAttribute
742 # were "mailAddress" because (naDsa) can also correctly
743 # hold the SMTP ISM service address.
745 nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
747 # We're not currently supporting SMTP replication
748 # so is_smtp_replication_available() is currently
749 # always returning False
750 if ((same_site or
751 cn_conn.transport_dnstr is None or
752 cn_conn.transport_dnstr.find("CN=IP") == 0 or
753 not is_smtp_replication_available())):
755 if ((t_repsFrom.replica_flags &
756 drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0):
757 t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
759 t_repsFrom.transport_guid = misc.GUID()
761 # See (NOTE MS-TECH INCORRECT) above
762 if t_repsFrom.version == 0x1:
763 if t_repsFrom.dns_name1 is None or \
764 t_repsFrom.dns_name1 != nastr:
765 t_repsFrom.dns_name1 = nastr
766 else:
767 if t_repsFrom.dns_name1 is None or \
768 t_repsFrom.dns_name2 is None or \
769 t_repsFrom.dns_name1 != nastr or \
770 t_repsFrom.dns_name2 != nastr:
771 t_repsFrom.dns_name1 = nastr
772 t_repsFrom.dns_name2 = nastr
774 else:
775 # XXX This entire branch is NEVER used! Because we don't do SMTP!
776 # (see the if condition above). Just close your eyes here.
777 if ((t_repsFrom.replica_flags &
778 drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0):
779 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP
781 # We have a transport type but its not an
782 # object in the database
783 if cn_conn.transport_guid not in self.transport_table:
784 raise KCCError("Missing inter-site transport - (%s)" %
785 cn_conn.transport_dnstr)
787 x_transport = self.transport_table[str(cn_conn.transport_guid)]
789 if t_repsFrom.transport_guid != x_transport.guid:
790 t_repsFrom.transport_guid = x_transport.guid
792 # See (NOTE MS-TECH INCORRECT) above
793 if x_transport.address_attr == "dNSHostName":
795 if t_repsFrom.version == 0x1:
796 if t_repsFrom.dns_name1 is None or \
797 t_repsFrom.dns_name1 != nastr:
798 t_repsFrom.dns_name1 = nastr
799 else:
800 if t_repsFrom.dns_name1 is None or \
801 t_repsFrom.dns_name2 is None or \
802 t_repsFrom.dns_name1 != nastr or \
803 t_repsFrom.dns_name2 != nastr:
804 t_repsFrom.dns_name1 = nastr
805 t_repsFrom.dns_name2 = nastr
807 else:
808 # MS tech specification says we retrieve the named
809 # attribute in "transportAddressAttribute" from the parent of
810 # the DSA object
811 try:
812 pdnstr = s_dsa.get_parent_dnstr()
813 attrs = [x_transport.address_attr]
815 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
816 attrs=attrs)
817 except ldb.LdbError, (enum, estr):
818 raise KCCError(
819 "Unable to find attr (%s) for (%s) - (%s)" %
820 (x_transport.address_attr, pdnstr, estr))
822 msg = res[0]
823 nastr = str(msg[x_transport.address_attr][0])
825 # See (NOTE MS-TECH INCORRECT) above
826 if t_repsFrom.version == 0x1:
827 if t_repsFrom.dns_name1 is None or \
828 t_repsFrom.dns_name1 != nastr:
829 t_repsFrom.dns_name1 = nastr
830 else:
831 if t_repsFrom.dns_name1 is None or \
832 t_repsFrom.dns_name2 is None or \
833 t_repsFrom.dns_name1 != nastr or \
834 t_repsFrom.dns_name2 != nastr:
836 t_repsFrom.dns_name1 = nastr
837 t_repsFrom.dns_name2 = nastr
839 if t_repsFrom.is_modified():
840 logger.debug("modify_repsFrom(): %s" % t_repsFrom)
842 def is_repsFrom_implied(self, n_rep, cn_conn):
843 """Given a NC replica and NTDS Connection, determine if the connection
844 implies a repsFrom tuple should be present from the source DSA listed
845 in the connection to the naming context
847 :param n_rep: NC replica
848 :param conn: NTDS Connection
849 ::returns (True || False), source DSA:
851 #XXX different conditions for "implies" than MS-ADTS 6.2.2
853 # NTDS Connection must satisfy all the following criteria
854 # to imply a repsFrom tuple is needed:
856 # cn!enabledConnection = true.
857 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
858 # cn!fromServer references an nTDSDSA object.
860 s_dsa = None
862 if cn_conn.is_enabled() and not cn_conn.is_rodc_topology():
863 s_dnstr = cn_conn.get_from_dnstr()
864 if s_dnstr is not None:
865 s_dsa = self.get_dsa(s_dnstr)
867 # No DSA matching this source DN string?
868 if s_dsa is None:
869 return False, None
871 # To imply a repsFrom tuple is needed, each of these
872 # must be True:
874 # An NC replica of the NC "is present" on the DC to
875 # which the nTDSDSA object referenced by cn!fromServer
876 # corresponds.
878 # An NC replica of the NC "should be present" on
879 # the local DC
880 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
882 if s_rep is None or not s_rep.is_present():
883 return False, None
885 # To imply a repsFrom tuple is needed, each of these
886 # must be True:
888 # The NC replica on the DC referenced by cn!fromServer is
889 # a writable replica or the NC replica that "should be
890 # present" on the local DC is a partial replica.
892 # The NC is not a domain NC, the NC replica that
893 # "should be present" on the local DC is a partial
894 # replica, cn!transportType has no value, or
895 # cn!transportType has an RDN of CN=IP.
897 implied = (not s_rep.is_ro() or n_rep.is_partial()) and \
898 (not n_rep.is_domain() or
899 n_rep.is_partial() or
900 cn_conn.transport_dnstr is None or
901 cn_conn.transport_dnstr.find("CN=IP") == 0)
903 if implied:
904 return True, s_dsa
905 else:
906 return False, None
908 def translate_ntdsconn(self, current_dsa=None):
909 """Adjust repsFrom to match NTDSConnections
911 This function adjusts values of repsFrom abstract attributes of NC
912 replicas on the local DC to match those implied by
913 nTDSConnection objects.
915 Based on [MS-ADTS] 6.2.2.5
917 :param current_dsa: optional DSA on whose behalf we are acting.
918 :return: None
920 count = 0
922 if current_dsa is None:
923 current_dsa = self.my_dsa
925 if current_dsa.is_translate_ntdsconn_disabled():
926 logger.debug("skipping translate_ntdsconn() "
927 "because disabling flag is set")
928 return
930 logger.debug("translate_ntdsconn(): enter")
932 current_rep_table, needed_rep_table = current_dsa.get_rep_tables()
934 # Filled in with replicas we currently have that need deleting
935 delete_reps = set()
937 # We're using the MS notation names here to allow
938 # correlation back to the published algorithm.
940 # n_rep - NC replica (n)
941 # t_repsFrom - tuple (t) in n!repsFrom
942 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
943 # object (s) such that (s!objectGUID = t.uuidDsa)
944 # In our IDL representation of repsFrom the (uuidDsa)
945 # attribute is called (source_dsa_obj_guid)
946 # cn_conn - (cn) is nTDSConnection object and child of the local
947 # DC's nTDSDSA object and (cn!fromServer = s)
948 # s_rep - source DSA replica of n
950 # If we have the replica and its not needed
951 # then we add it to the "to be deleted" list.
952 for dnstr in current_rep_table:
953 if dnstr not in needed_rep_table:
954 delete_reps.add(dnstr)
956 DEBUG_FN('current %d needed %d delete %d' % (len(current_rep_table),
957 len(needed_rep_table), len(delete_reps)))
959 if delete_reps:
960 DEBUG('deleting these reps: %s' % delete_reps)
961 for dnstr in delete_reps:
962 del current_rep_table[dnstr]
964 # Now perform the scan of replicas we'll need
965 # and compare any current repsFrom against the
966 # connections
967 for n_rep in needed_rep_table.values():
969 # load any repsFrom and fsmo roles as we'll
970 # need them during connection translation
971 n_rep.load_repsFrom(self.samdb)
972 n_rep.load_fsmo_roles(self.samdb)
974 # Loop thru the existing repsFrom tupples (if any)
975 # XXX This is a list and could contain duplicates
976 # (multiple load_repsFrom calls)
977 for t_repsFrom in n_rep.rep_repsFrom:
979 # for each tuple t in n!repsFrom, let s be the nTDSDSA
980 # object such that s!objectGUID = t.uuidDsa
981 guidstr = str(t_repsFrom.source_dsa_obj_guid)
982 s_dsa = self.get_dsa_by_guidstr(guidstr)
984 # Source dsa is gone from config (strange)
985 # so cleanup stale repsFrom for unlisted DSA
986 if s_dsa is None:
987 logger.warning("repsFrom source DSA guid (%s) not found" %
988 guidstr)
989 t_repsFrom.to_be_deleted = True
990 continue
992 s_dnstr = s_dsa.dsa_dnstr
994 # Retrieve my DSAs connection object (if it exists)
995 # that specifies the fromServer equivalent to
996 # the DSA that is specified in the repsFrom source
997 connections = current_dsa.get_connection_by_from_dnstr(s_dnstr)
999 count = 0
1000 cn_conn = None
1002 for con in connections:
1003 if con.is_rodc_topology():
1004 continue
1005 cn_conn = con
1007 # Let (cn) be the nTDSConnection object such that (cn)
1008 # is a child of the local DC's nTDSDSA object and
1009 # (cn!fromServer = s) and (cn!options) does not contain
1010 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
1012 # KCC removes this repsFrom tuple if any of the following
1013 # is true:
1014 # cn = NULL.
1015 # [...]
1017 #XXX varying possible interpretations of rodc_topology
1018 if cn_conn is None:
1019 t_repsFrom.to_be_deleted = True
1020 continue
1022 # [...] KCC removes this repsFrom tuple if:
1024 # No NC replica of the NC "is present" on DSA that
1025 # would be source of replica
1027 # A writable replica of the NC "should be present" on
1028 # the local DC, but a partial replica "is present" on
1029 # the source DSA
1030 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
1032 if s_rep is None or not s_rep.is_present() or \
1033 (not n_rep.is_ro() and s_rep.is_partial()):
1035 t_repsFrom.to_be_deleted = True
1036 continue
1038 # If the KCC did not remove t from n!repsFrom, it updates t
1039 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
1041 # Loop thru connections and add implied repsFrom tuples
1042 # for each NTDSConnection under our local DSA if the
1043 # repsFrom is not already present
1044 for cn_conn in current_dsa.connect_table.values():
1046 implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn)
1047 if not implied:
1048 continue
1050 # Loop thru the existing repsFrom tupples (if any) and
1051 # if we already have a tuple for this connection then
1052 # no need to proceed to add. It will have been changed
1053 # to have the correct attributes above
1054 for t_repsFrom in n_rep.rep_repsFrom:
1055 guidstr = str(t_repsFrom.source_dsa_obj_guid)
1056 #XXX what?
1057 if s_dsa is self.get_dsa_by_guidstr(guidstr):
1058 s_dsa = None
1059 break
1061 if s_dsa is None:
1062 continue
1064 # Create a new RepsFromTo and proceed to modify
1065 # it according to specification
1066 t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
1068 t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
1070 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
1072 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
1074 # Add to our NC repsFrom as this is newly computed
1075 if t_repsFrom.is_modified():
1076 n_rep.rep_repsFrom.append(t_repsFrom)
1078 if opts.readonly:
1079 # Display any to be deleted or modified repsFrom
1080 text = n_rep.dumpstr_to_be_deleted()
1081 if text:
1082 logger.info("TO BE DELETED:\n%s" % text)
1083 text = n_rep.dumpstr_to_be_modified()
1084 if text:
1085 logger.info("TO BE MODIFIED:\n%s" % text)
1087 # Peform deletion from our tables but perform
1088 # no database modification
1089 n_rep.commit_repsFrom(self.samdb, ro=True)
1090 else:
1091 # Commit any modified repsFrom to the NC replica
1092 n_rep.commit_repsFrom(self.samdb)
1094 def merge_failed_links(self):
1095 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
1097 The KCC on a writable DC attempts to merge the link and connection
1098 failure information from bridgehead DCs in its own site to help it
1099 identify failed bridgehead DCs.
1101 Based on MS-ADTS 6.2.2.3.2 "Merge of kCCFailedLinks and kCCFailedLinks
1102 from Bridgeheads"
1104 :param ping: An oracle of current bridgehead availability
1105 :return: None
1107 # 1. Queries every bridgehead server in your site (other than yourself)
1108 # 2. For every ntDSConnection that references a server in a different
1109 # site merge all the failure info
1111 # XXX - not implemented yet
1112 if opts.attempt_live_connections:
1113 DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
1114 else:
1115 DEBUG_FN("skipping merge_failed_links() because it requires "
1116 "real network connections\n"
1117 "and we weren't asked to --attempt-live-connections")
1119 def setup_graph(self, part):
1120 """Set up an intersite graph
1122 An intersite graph has a Vertex for each site object, a
1123 MultiEdge for each SiteLink object, and a MutliEdgeSet for
1124 each siteLinkBridge object (or implied siteLinkBridge). It
1125 reflects the intersite topology in a slightly more abstract
1126 graph form.
1128 Roughly corresponds to MS-ADTS 6.2.2.3.4.3
1130 :param part: a Partition object
1131 :returns: an InterSiteGraph object
1133 guid_to_vertex = {}
1134 # Create graph
1135 g = IntersiteGraph()
1136 # Add vertices
1137 for site_guid, site in self.site_table.items():
1138 vertex = Vertex(site, part)
1139 vertex.guid = site_guid
1140 vertex.ndrpacked_guid = ndr_pack(site.site_guid)
1141 g.vertices.add(vertex)
1143 if not guid_to_vertex.get(site_guid):
1144 guid_to_vertex[site_guid] = []
1146 guid_to_vertex[site_guid].append(vertex)
1148 connected_vertices = set()
1149 for transport_guid, transport in self.transport_table.items():
1150 # Currently only ever "IP"
1151 if transport.name != 'IP':
1152 DEBUG_FN("setup_graph is ignoring transport %s" %
1153 transport.name)
1154 continue
1155 for site_link_dn, site_link in self.sitelink_table.items():
1156 new_edge = create_edge(transport_guid, site_link,
1157 guid_to_vertex)
1158 connected_vertices.update(new_edge.vertices)
1159 g.edges.add(new_edge)
1161 # If 'Bridge all site links' is enabled and Win2k3 bridges required
1162 # is not set
1163 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1164 # No documentation for this however, ntdsapi.h appears to have:
1165 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1166 if (((self.my_site.site_options & 0x00000002) == 0
1167 and (self.my_site.site_options & 0x00001000) == 0)):
1168 g.edge_set.add(create_auto_edge_set(g, transport_guid))
1169 else:
1170 # TODO get all site link bridges
1171 for site_link_bridge in []:
1172 g.edge_set.add(create_edge_set(g, transport_guid,
1173 site_link_bridge))
1175 g.connected_vertices = connected_vertices
1177 #be less verbose in dot file output unless --debug
1178 do_dot_files = opts.dot_files and opts.debug
1179 dot_edges = []
1180 for edge in g.edges:
1181 for a, b in itertools.combinations(edge.vertices, 2):
1182 dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
1183 verify_properties = ()
1184 verify_and_dot('site_edges', dot_edges, directed=False,
1185 label=self.my_dsa_dnstr,
1186 properties=verify_properties, debug=DEBUG,
1187 verify=opts.verify,
1188 dot_files=do_dot_files)
1190 return g
1192 def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
1193 """Get a bridghead DC for a site.
1195 Part of MS-ADTS 6.2.2.3.4.4
1197 :param site: site object representing for which a bridgehead
1198 DC is desired.
1199 :param part: crossRef for NC to replicate.
1200 :param transport: interSiteTransport object for replication
1201 traffic.
1202 :param partial_ok: True if a DC containing a partial
1203 replica or a full replica will suffice, False if only
1204 a full replica will suffice.
1205 :param detect_failed: True to detect failed DCs and route
1206 replication traffic around them, False to assume no DC
1207 has failed.
1208 :return: dsa object for the bridgehead DC or None
1211 bhs = self.get_all_bridgeheads(site, part, transport,
1212 partial_ok, detect_failed)
1213 if len(bhs) == 0:
1214 DEBUG_MAGENTA("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
1215 site.site_dnstr)
1216 return None
1217 else:
1218 DEBUG_GREEN("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
1219 (site.site_dnstr, bhs[0].dsa_dnstr))
1220 return bhs[0]
1222 def get_all_bridgeheads(self, site, part, transport,
1223 partial_ok, detect_failed):
1224 """Get all bridghead DCs on a site satisfying the given criteria
1226 Part of MS-ADTS 6.2.2.3.4.4
1228 :param site: site object representing the site for which
1229 bridgehead DCs are desired.
1230 :param part: partition for NC to replicate.
1231 :param transport: interSiteTransport object for
1232 replication traffic.
1233 :param partial_ok: True if a DC containing a partial
1234 replica or a full replica will suffice, False if
1235 only a full replica will suffice.
1236 :param detect_failed: True to detect failed DCs and route
1237 replication traffic around them, FALSE to assume
1238 no DC has failed.
1239 :return: list of dsa object for available bridgehead DCs
1242 bhs = []
1244 logger.debug("get_all_bridgeheads: %s" % transport.name)
1245 if 'Site-5' in site.site_dnstr:
1246 DEBUG_RED("get_all_bridgeheads with %s, part%s, partial_ok %s"
1247 " detect_failed %s" % (site.site_dnstr, part.partstr,
1248 partial_ok, detect_failed))
1249 logger.debug(site.rw_dsa_table)
1250 for dsa in site.rw_dsa_table.values():
1252 pdnstr = dsa.get_parent_dnstr()
1254 # IF t!bridgeheadServerListBL has one or more values and
1255 # t!bridgeheadServerListBL does not contain a reference
1256 # to the parent object of dc then skip dc
1257 if ((len(transport.bridgehead_list) != 0 and
1258 pdnstr not in transport.bridgehead_list)):
1259 continue
1261 # IF dc is in the same site as the local DC
1262 # IF a replica of cr!nCName is not in the set of NC replicas
1263 # that "should be present" on dc or a partial replica of the
1264 # NC "should be present" but partialReplicasOkay = FALSE
1265 # Skip dc
1266 if self.my_site.same_site(dsa):
1267 needed, ro, partial = part.should_be_present(dsa)
1268 if not needed or (partial and not partial_ok):
1269 continue
1270 rep = dsa.get_current_replica(part.nc_dnstr)
1272 # ELSE
1273 # IF an NC replica of cr!nCName is not in the set of NC
1274 # replicas that "are present" on dc or a partial replica of
1275 # the NC "is present" but partialReplicasOkay = FALSE
1276 # Skip dc
1277 else:
1278 rep = dsa.get_current_replica(part.nc_dnstr)
1279 if rep is None or (rep.is_partial() and not partial_ok):
1280 continue
1282 # IF AmIRODC() and cr!nCName corresponds to default NC then
1283 # Let dsaobj be the nTDSDSA object of the dc
1284 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1285 # Skip dc
1286 if self.my_dsa.is_ro() and rep is not None and rep.is_default():
1287 if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1288 continue
1290 # IF t!name != "IP" and the parent object of dc has no value for
1291 # the attribute specified by t!transportAddressAttribute
1292 # Skip dc
1293 if transport.name != "IP":
1294 # MS tech specification says we retrieve the named
1295 # attribute in "transportAddressAttribute" from the parent
1296 # of the DSA object
1297 try:
1298 attrs = [transport.address_attr]
1300 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
1301 attrs=attrs)
1302 except ldb.LdbError, (enum, estr):
1303 continue
1305 msg = res[0]
1306 if transport.address_attr not in msg:
1307 continue
1308 #XXX nastr is NEVER USED. It will be removed.
1309 nastr = str(msg[transport.address_attr][0])
1311 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1312 # Skip dc
1313 if self.is_bridgehead_failed(dsa, detect_failed):
1314 DEBUG("bridgehead is failed")
1315 continue
1317 logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
1318 bhs.append(dsa)
1320 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1321 # s!options
1322 # SORT bhs such that all GC servers precede DCs that are not GC
1323 # servers, and otherwise by ascending objectGUID
1324 # ELSE
1325 # SORT bhs in a random order
1326 if site.is_random_bridgehead_disabled():
1327 bhs.sort(sort_dsa_by_gc_and_guid)
1328 else:
1329 random.shuffle(bhs)
1330 DEBUG_YELLOW(bhs)
1331 return bhs
1333 def is_bridgehead_failed(self, dsa, detect_failed):
1334 """Determine whether a given DC is known to be in a failed state
1336 :param dsa: the bridgehead to test
1337 :param detect_failed: True to really check, False to assume no failure
1338 :return: True if and only if the DC should be considered failed
1340 Here we DEPART from the pseudo code spec which appears to be
1341 wrong. It says, in full:
1343 /***** BridgeheadDCFailed *****/
1344 /* Determine whether a given DC is known to be in a failed state.
1345 * IN: objectGUID - objectGUID of the DC's nTDSDSA object.
1346 * IN: detectFailedDCs - TRUE if and only failed DC detection is
1347 * enabled.
1348 * RETURNS: TRUE if and only if the DC should be considered to be in a
1349 * failed state.
1351 BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
1353 IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
1354 the options attribute of the site settings object for the local
1355 DC's site
1356 RETURN FALSE
1357 ELSEIF a tuple z exists in the kCCFailedLinks or
1358 kCCFailedConnections variables such that z.UUIDDsa =
1359 objectGUID, z.FailureCount > 1, and the current time -
1360 z.TimeFirstFailure > 2 hours
1361 RETURN TRUE
1362 ELSE
1363 RETURN detectFailedDCs
1364 ENDIF
1367 where you will see detectFailedDCs is not behaving as
1368 advertised -- it is acting as a default return code in the
1369 event that a failure is not detected, not a switch turning
1370 detection on or off. Elsewhere the documentation seems to
1371 concur with the comment rather than the code.
1373 if not detect_failed:
1374 return False
1376 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1377 # When DETECT_STALE_DISABLED, we can never know of if
1378 # it's in a failed state
1379 if self.my_site.site_options & 0x00000008:
1380 return False
1382 return self.is_stale_link_connection(dsa)
1384 def create_connection(self, part, rbh, rsite, transport,
1385 lbh, lsite, link_opt, link_sched,
1386 partial_ok, detect_failed):
1387 """Create an nTDSConnection object as specified if it doesn't exist.
1389 Part of MS-ADTS 6.2.2.3.4.5
1391 :param part: crossRef object for the NC to replicate.
1392 :param rbh: nTDSDSA object for DC to act as the
1393 IDL_DRSGetNCChanges server (which is in a site other
1394 than the local DC's site).
1395 :param rsite: site of the rbh
1396 :param transport: interSiteTransport object for the transport
1397 to use for replication traffic.
1398 :param lbh: nTDSDSA object for DC to act as the
1399 IDL_DRSGetNCChanges client (which is in the local DC's site).
1400 :param lsite: site of the lbh
1401 :param link_opt: Replication parameters (aggregated siteLink options,
1402 etc.)
1403 :param link_sched: Schedule specifying the times at which
1404 to begin replicating.
1405 :partial_ok: True if bridgehead DCs containing partial
1406 replicas of the NC are acceptable.
1407 :param detect_failed: True to detect failed DCs and route
1408 replication traffic around them, FALSE to assume no DC
1409 has failed.
1411 rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
1412 partial_ok, False)
1413 rbh_table = {x.dsa_dnstr: x for x in rbhs_all}
1415 DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all),
1416 [x.dsa_dnstr for x in rbhs_all]))
1418 # MS-TECH says to compute rbhs_avail but then doesn't use it
1419 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1420 # partial_ok, detect_failed)
1422 lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
1423 partial_ok, False)
1424 if lbh.is_ro():
1425 lbhs_all.append(lbh)
1427 DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all),
1428 [x.dsa_dnstr for x in lbhs_all]))
1430 # MS-TECH says to compute lbhs_avail but then doesn't use it
1431 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1432 # partial_ok, detect_failed)
1434 # FOR each nTDSConnection object cn such that the parent of cn is
1435 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1436 for ldsa in lbhs_all:
1437 for cn in ldsa.connect_table.values():
1439 rdsa = rbh_table.get(cn.from_dnstr)
1440 if rdsa is None:
1441 continue
1443 DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr)
1444 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1445 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1446 # cn!transportType references t
1447 if ((cn.is_generated() and
1448 not cn.is_rodc_topology() and
1449 cn.transport_guid == transport.guid)):
1451 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1452 # cn!options and cn!schedule != sch
1453 # Perform an originating update to set cn!schedule to
1454 # sched
1455 if ((not cn.is_user_owned_schedule() and
1456 not cn.is_equivalent_schedule(link_sched))):
1457 cn.schedule = link_sched
1458 cn.set_modified(True)
1460 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1461 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1462 if cn.is_override_notify_default() and \
1463 cn.is_use_notify():
1465 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1466 # ri.Options
1467 # Perform an originating update to clear bits
1468 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1469 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1470 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
1471 cn.options &= \
1472 ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1473 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1474 cn.set_modified(True)
1476 # ELSE
1477 else:
1479 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1480 # ri.Options
1481 # Perform an originating update to set bits
1482 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1483 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1484 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1485 cn.options |= \
1486 (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1487 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1488 cn.set_modified(True)
1490 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1491 if cn.is_twoway_sync():
1493 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1494 # ri.Options
1495 # Perform an originating update to clear bit
1496 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1497 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
1498 cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1499 cn.set_modified(True)
1501 # ELSE
1502 else:
1504 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1505 # ri.Options
1506 # Perform an originating update to set bit
1507 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1508 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1509 cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1510 cn.set_modified(True)
1512 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1513 # in cn!options
1514 if cn.is_intersite_compression_disabled():
1516 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1517 # in ri.Options
1518 # Perform an originating update to clear bit
1519 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1520 # cn!options
1521 if ((link_opt &
1522 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0):
1523 cn.options &= \
1524 ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1525 cn.set_modified(True)
1527 # ELSE
1528 else:
1529 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1530 # ri.Options
1531 # Perform an originating update to set bit
1532 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1533 # cn!options
1534 if ((link_opt &
1535 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
1536 cn.options |= \
1537 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1538 cn.set_modified(True)
1540 # Display any modified connection
1541 if opts.readonly:
1542 if cn.to_be_modified:
1543 logger.info("TO BE MODIFIED:\n%s" % cn)
1545 ldsa.commit_connections(self.samdb, ro=True)
1546 else:
1547 ldsa.commit_connections(self.samdb)
1548 # ENDFOR
1550 valid_connections = 0
1552 # FOR each nTDSConnection object cn such that cn!parent is
1553 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1554 for ldsa in lbhs_all:
1555 for cn in ldsa.connect_table.values():
1557 rdsa = rbh_table.get(cn.from_dnstr)
1558 if rdsa is None:
1559 continue
1561 DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr)
1563 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1564 # cn!transportType references t) and
1565 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1566 if (((not cn.is_generated() or
1567 cn.transport_guid == transport.guid) and
1568 not cn.is_rodc_topology())):
1570 # LET rguid be the objectGUID of the nTDSDSA object
1571 # referenced by cn!fromServer
1572 # LET lguid be (cn!parent)!objectGUID
1574 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1575 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1576 # Increment cValidConnections by 1
1577 if ((not self.is_bridgehead_failed(rdsa, detect_failed) and
1578 not self.is_bridgehead_failed(ldsa, detect_failed))):
1579 valid_connections += 1
1581 # IF keepConnections does not contain cn!objectGUID
1582 # APPEND cn!objectGUID to keepConnections
1583 self.kept_connections.add(cn)
1585 # ENDFOR
1586 DEBUG_RED("valid connections %d" % valid_connections)
1587 DEBUG("kept_connections:\n%s" % (self.kept_connections,))
1588 # IF cValidConnections = 0
1589 if valid_connections == 0:
1591 # LET opt be NTDSCONN_OPT_IS_GENERATED
1592 opt = dsdb.NTDSCONN_OPT_IS_GENERATED
1594 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1595 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1596 # NTDSCONN_OPT_USE_NOTIFY in opt
1597 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1598 opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1599 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1601 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1602 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1603 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1604 opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1606 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1607 # ri.Options
1608 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1609 if ((link_opt &
1610 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
1611 opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1613 # Perform an originating update to create a new nTDSConnection
1614 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1615 # cn!options = opt, cn!transportType is a reference to t,
1616 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1617 DEBUG_FN("new connection, KCC dsa: %s" % self.my_dsa.dsa_dnstr)
1618 cn = lbh.new_connection(opt, 0, transport,
1619 rbh.dsa_dnstr, link_sched)
1621 # Display any added connection
1622 if opts.readonly:
1623 if cn.to_be_added:
1624 logger.info("TO BE ADDED:\n%s" % cn)
1626 lbh.commit_connections(self.samdb, ro=True)
1627 else:
1628 lbh.commit_connections(self.samdb)
1630 # APPEND cn!objectGUID to keepConnections
1631 self.kept_connections.add(cn)
1633 def add_transports(self, vertex, local_vertex, graph, detect_failed):
1634 """Build a Vertex's transport lists
1636 Each vertex has accept_red_red and accept_black lists that
1637 list what transports they accept under various conditions. The
1638 only transport that is ever accepted is IP, and a dummy extra
1639 transport called "EDGE_TYPE_ALL".
1641 Part of MS-ADTS 6.2.2.3.4.3 -- ColorVertices
1643 :param vertex: the remote vertex we are thinking about
1644 :param local_vertex: the vertex relating to the local site.
1645 :param graph: the intersite graph
1646 :param detect_failed: whether to detect failed links
1647 :return: True if some bridgeheads were not found
1649 # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
1650 # here, but using vertex seems to make more sense. That is,
1651 # the docs want this:
1653 #bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1654 # local_vertex.is_black(), detect_failed)
1656 # TODO WHY?????
1658 vertex.accept_red_red = []
1659 vertex.accept_black = []
1660 found_failed = False
1661 for t_guid, transport in self.transport_table.items():
1662 if transport.name != 'IP':
1663 #XXX well this is cheating a bit
1664 logging.warning("WARNING: we are ignoring a transport named %r"
1665 % transport.name)
1666 continue
1668 # FLAG_CR_NTDS_DOMAIN 0x00000002
1669 if ((vertex.is_red() and transport.name != "IP" and
1670 vertex.part.system_flags & 0x00000002)):
1671 continue
1673 if vertex not in graph.connected_vertices:
1674 continue
1676 partial_replica_okay = vertex.is_black()
1677 bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1678 partial_replica_okay, detect_failed)
1679 if bh is None:
1680 found_failed = True
1681 continue
1683 vertex.accept_red_red.append(t_guid)
1684 vertex.accept_black.append(t_guid)
1686 # Add additional transport to allow another run of Dijkstra
1687 vertex.accept_red_red.append("EDGE_TYPE_ALL")
1688 vertex.accept_black.append("EDGE_TYPE_ALL")
1690 return found_failed
1692 def create_connections(self, graph, part, detect_failed):
1693 """Construct an NC replica graph for the NC identified by
1694 the given crossRef, then create any additional nTDSConnection
1695 objects required.
1697 :param graph: site graph.
1698 :param part: crossRef object for NC.
1699 :param detect_failed: True to detect failed DCs and route
1700 replication traffic around them, False to assume no DC
1701 has failed.
1703 Modifies self.kept_connections by adding any connections
1704 deemed to be "in use".
1706 ::returns: (all_connected, found_failed_dc)
1707 (all_connected) True if the resulting NC replica graph
1708 connects all sites that need to be connected.
1709 (found_failed_dc) True if one or more failed DCs were
1710 detected.
1712 all_connected = True
1713 found_failed = False
1715 logger.debug("create_connections(): enter\n"
1716 "\tpartdn=%s\n\tdetect_failed=%s" %
1717 (part.nc_dnstr, detect_failed))
1719 # XXX - This is a highly abbreviated function from the MS-TECH
1720 # ref. It creates connections between bridgeheads to all
1721 # sites that have appropriate replicas. Thus we are not
1722 # creating a minimum cost spanning tree but instead
1723 # producing a fully connected tree. This should produce
1724 # a full (albeit not optimal cost) replication topology.
1726 my_vertex = Vertex(self.my_site, part)
1727 my_vertex.color_vertex()
1729 for v in graph.vertices:
1730 v.color_vertex()
1731 if self.add_transports(v, my_vertex, graph, False):
1732 found_failed = True
1734 # No NC replicas for this NC in the site of the local DC,
1735 # so no nTDSConnection objects need be created
1736 if my_vertex.is_white():
1737 return all_connected, found_failed
1739 edge_list, n_components = get_spanning_tree_edges(graph,
1740 self.my_site,
1741 label=part.partstr)
1743 logger.debug("%s Number of components: %d" %
1744 (part.nc_dnstr, n_components))
1745 if n_components > 1:
1746 all_connected = False
1748 # LET partialReplicaOkay be TRUE if and only if
1749 # localSiteVertex.Color = COLOR.BLACK
1750 partial_ok = my_vertex.is_black()
1752 # Utilize the IP transport only for now
1753 transport = self.ip_transport
1755 DEBUG("edge_list %s" % edge_list)
1756 for e in edge_list:
1757 # XXX more accurate comparison?
1758 if e.directed and e.vertices[0].site is self.my_site:
1759 continue
1761 if e.vertices[0].site is self.my_site:
1762 rsite = e.vertices[1].site
1763 else:
1764 rsite = e.vertices[0].site
1766 # We don't make connections to our own site as that
1767 # is intrasite topology generator's job
1768 if rsite is self.my_site:
1769 DEBUG("rsite is my_site")
1770 continue
1772 # Determine bridgehead server in remote site
1773 rbh = self.get_bridgehead(rsite, part, transport,
1774 partial_ok, detect_failed)
1775 if rbh is None:
1776 continue
1778 # RODC acts as an BH for itself
1779 # IF AmIRODC() then
1780 # LET lbh be the nTDSDSA object of the local DC
1781 # ELSE
1782 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1783 # cr, t, partialReplicaOkay, detectFailedDCs)
1784 if self.my_dsa.is_ro():
1785 lsite = self.my_site
1786 lbh = self.my_dsa
1787 else:
1788 lsite = self.my_site
1789 lbh = self.get_bridgehead(lsite, part, transport,
1790 partial_ok, detect_failed)
1791 # TODO
1792 if lbh is None:
1793 DEBUG_RED("DISASTER! lbh is None")
1794 return False, True
1796 DEBUG_CYAN("SITES")
1797 print lsite, rsite
1798 DEBUG_BLUE("vertices")
1799 print e.vertices
1800 DEBUG_BLUE("bridgeheads")
1801 print lbh, rbh
1802 DEBUG_BLUE("-" * 70)
1804 sitelink = e.site_link
1805 if sitelink is None:
1806 link_opt = 0x0
1807 link_sched = None
1808 else:
1809 link_opt = sitelink.options
1810 link_sched = sitelink.schedule
1812 self.create_connection(part, rbh, rsite, transport,
1813 lbh, lsite, link_opt, link_sched,
1814 partial_ok, detect_failed)
1816 return all_connected, found_failed
1818 def create_intersite_connections(self):
1819 """Computes an NC replica graph for each NC replica that "should be
1820 present" on the local DC or "is present" on any DC in the same site
1821 as the local DC. For each edge directed to an NC replica on such a
1822 DC from an NC replica on a DC in another site, the KCC creates an
1823 nTDSConnection object to imply that edge if one does not already
1824 exist.
1826 Modifies self.kept_connections - A set of nTDSConnection
1827 objects for edges that are directed
1828 to the local DC's site in one or more NC replica graphs.
1830 returns: True if spanning trees were created for all NC replica
1831 graphs, otherwise False.
1833 all_connected = True
1834 self.kept_connections = set()
1836 # LET crossRefList be the set containing each object o of class
1837 # crossRef such that o is a child of the CN=Partitions child of the
1838 # config NC
1840 # FOR each crossRef object cr in crossRefList
1841 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1842 # is clear in cr!systemFlags, skip cr.
1843 # LET g be the GRAPH return of SetupGraph()
1845 for part in self.part_table.values():
1847 if not part.is_enabled():
1848 continue
1850 if part.is_foreign():
1851 continue
1853 graph = self.setup_graph(part)
1855 # Create nTDSConnection objects, routing replication traffic
1856 # around "failed" DCs.
1857 found_failed = False
1859 connected, found_failed = self.create_connections(graph,
1860 part, True)
1862 DEBUG("with detect_failed: connected %s Found failed %s" %
1863 (connected, found_failed))
1864 if not connected:
1865 all_connected = False
1867 if found_failed:
1868 # One or more failed DCs preclude use of the ideal NC
1869 # replica graph. Add connections for the ideal graph.
1870 self.create_connections(graph, part, False)
1872 return all_connected
1874 def intersite(self):
1875 """The head method for generating the inter-site KCC replica
1876 connection graph and attendant nTDSConnection objects
1877 in the samdb.
1879 Produces self.kept_connections set of NTDS Connections
1880 that should be kept during subsequent pruning process.
1882 ::return (True or False): (True) if the produced NC replica
1883 graph connects all sites that need to be connected
1886 # Retrieve my DSA
1887 mydsa = self.my_dsa
1888 mysite = self.my_site
1889 all_connected = True
1891 logger.debug("intersite(): enter")
1893 # Determine who is the ISTG
1894 if opts.readonly:
1895 mysite.select_istg(self.samdb, mydsa, ro=True)
1896 else:
1897 mysite.select_istg(self.samdb, mydsa, ro=False)
1899 # Test whether local site has topology disabled
1900 if mysite.is_intersite_topology_disabled():
1901 logger.debug("intersite(): exit disabled all_connected=%d" %
1902 all_connected)
1903 return all_connected
1905 if not mydsa.is_istg():
1906 logger.debug("intersite(): exit not istg all_connected=%d" %
1907 all_connected)
1908 return all_connected
1910 self.merge_failed_links()
1912 # For each NC with an NC replica that "should be present" on the
1913 # local DC or "is present" on any DC in the same site as the
1914 # local DC, the KCC constructs a site graph--a precursor to an NC
1915 # replica graph. The site connectivity for a site graph is defined
1916 # by objects of class interSiteTransport, siteLink, and
1917 # siteLinkBridge in the config NC.
1919 all_connected = self.create_intersite_connections()
1921 logger.debug("intersite(): exit all_connected=%d" % all_connected)
1922 return all_connected
1924 def update_rodc_connection(self):
1925 """Runs when the local DC is an RODC and updates the RODC NTFRS
1926 connection object.
1928 # Given an nTDSConnection object cn1, such that cn1.options contains
1929 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1930 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1931 # that the following is true:
1933 # cn1.fromServer = cn2.fromServer
1934 # cn1.schedule = cn2.schedule
1936 # If no such cn2 can be found, cn1 is not modified.
1937 # If no such cn1 can be found, nothing is modified by this task.
1939 if not self.my_dsa.is_ro():
1940 return
1942 all_connections = self.my_dsa.connect_table.values()
1943 ro_connections = [x for x in all_connections if x.is_rodc_topology()]
1944 rw_connections = [x for x in all_connections
1945 if x not in ro_connections]
1947 # XXX here we are dealing with multiple RODC_TOPO connections,
1948 # if they exist. It is not clear whether the spec means that
1949 # or if it ever arises.
1950 if rw_connections and ro_connections:
1951 for con in ro_connections:
1952 cn2 = rw_connections[0]
1953 con.from_dnstr = cn2.from_dnstr
1954 con.schedule = cn2.schedule
1955 con.to_be_modified = True
1957 self.my_dsa.commit_connections(self.samdb, ro=opts.readonly)
1959 def intrasite_max_node_edges(self, node_count):
1960 """Returns the maximum number of edges directed to a node in
1961 the intrasite replica graph.
1963 The KCC does not create more
1964 than 50 edges directed to a single DC. To optimize replication,
1965 we compute that each node should have n+2 total edges directed
1966 to it such that (n) is the smallest non-negative integer
1967 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1969 (If the number of edges is m (i.e. n + 2), that is the same as
1970 2 * m*m - 2 * m + 3).
1972 edges n nodecount
1973 2 0 7
1974 3 1 15
1975 4 2 27
1976 5 3 43
1978 50 48 4903
1980 :param node_count: total number of nodes in the replica graph
1982 The intention is that there should be no more than 3 hops
1983 between any two DSAs at a site. With up to 7 nodes the 2 edges
1984 of the ring are enough; any configuration of extra edges with
1985 8 nodes will be enough. It is less clear that the 3 hop
1986 guarantee holds at e.g. 15 nodes in degenerate cases, but
1987 those are quite unlikely given the extra edges are randomly
1988 arranged.
1990 n = 0
1991 while True:
1992 if node_count <= (2 * (n * n) + (6 * n) + 7):
1993 break
1994 n = n + 1
1995 n = n + 2
1996 if n < 50:
1997 return n
1998 return 50
2000 def construct_intrasite_graph(self, site_local, dc_local,
2001 nc_x, gc_only, detect_stale):
2002 # [MS-ADTS] 6.2.2.2
2003 # We're using the MS notation names here to allow
2004 # correlation back to the published algorithm.
2006 # nc_x - naming context (x) that we are testing if it
2007 # "should be present" on the local DC
2008 # f_of_x - replica (f) found on a DC (s) for NC (x)
2009 # dc_s - DC where f_of_x replica was found
2010 # dc_local - local DC that potentially needs a replica
2011 # (f_of_x)
2012 # r_list - replica list R
2013 # p_of_x - replica (p) is partial and found on a DC (s)
2014 # for NC (x)
2015 # l_of_x - replica (l) is the local replica for NC (x)
2016 # that should appear on the local DC
2017 # r_len = is length of replica list |R|
2019 # If the DSA doesn't need a replica for this
2020 # partition (NC x) then continue
2021 needed, ro, partial = nc_x.should_be_present(dc_local)
2023 DEBUG_YELLOW("construct_intrasite_graph(): enter" +
2024 "\n\tgc_only=%d" % gc_only +
2025 "\n\tdetect_stale=%d" % detect_stale +
2026 "\n\tneeded=%s" % needed +
2027 "\n\tro=%s" % ro +
2028 "\n\tpartial=%s" % partial +
2029 "\n%s" % nc_x)
2031 if not needed:
2032 DEBUG_RED("%s lacks 'should be present' status, "
2033 "aborting construct_intersite_graph!" %
2034 nc_x.nc_dnstr)
2035 return
2037 # Create a NCReplica that matches what the local replica
2038 # should say. We'll use this below in our r_list
2039 l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
2040 nc_x.nc_dnstr)
2042 l_of_x.identify_by_basedn(self.samdb)
2044 l_of_x.rep_partial = partial
2045 l_of_x.rep_ro = ro
2047 # Add this replica that "should be present" to the
2048 # needed replica table for this DSA
2049 dc_local.add_needed_replica(l_of_x)
2051 # Replica list
2053 # Let R be a sequence containing each writable replica f of x
2054 # such that f "is present" on a DC s satisfying the following
2055 # criteria:
2057 # * s is a writable DC other than the local DC.
2059 # * s is in the same site as the local DC.
2061 # * If x is a read-only full replica and x is a domain NC,
2062 # then the DC's functional level is at least
2063 # DS_BEHAVIOR_WIN2008.
2065 # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
2066 # in the options attribute of the site settings object for
2067 # the local DC's site, or no tuple z exists in the
2068 # kCCFailedLinks or kCCFailedConnections variables such
2069 # that z.UUIDDsa is the objectGUID of the nTDSDSA object
2070 # for s, z.FailureCount > 0, and the current time -
2071 # z.TimeFirstFailure > 2 hours.
2073 r_list = []
2075 # We'll loop thru all the DSAs looking for
2076 # writeable NC replicas that match the naming
2077 # context dn for (nc_x)
2079 for dc_s in self.my_site.dsa_table.values():
2080 # If this partition (nc_x) doesn't appear as a
2081 # replica (f_of_x) on (dc_s) then continue
2082 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2083 continue
2085 # Pull out the NCReplica (f) of (x) with the dn
2086 # that matches NC (x) we are examining.
2087 f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2089 # Replica (f) of NC (x) must be writable
2090 if f_of_x.is_ro():
2091 continue
2093 # Replica (f) of NC (x) must satisfy the
2094 # "is present" criteria for DC (s) that
2095 # it was found on
2096 if not f_of_x.is_present():
2097 continue
2099 # DC (s) must be a writable DSA other than
2100 # my local DC. In other words we'd only replicate
2101 # from other writable DC
2102 if dc_s.is_ro() or dc_s is dc_local:
2103 continue
2105 # Certain replica graphs are produced only
2106 # for global catalogs, so test against
2107 # method input parameter
2108 if gc_only and not dc_s.is_gc():
2109 continue
2111 # DC (s) must be in the same site as the local DC
2112 # as this is the intra-site algorithm. This is
2113 # handled by virtue of placing DSAs in per
2114 # site objects (see enclosing for() loop)
2116 # If NC (x) is intended to be read-only full replica
2117 # for a domain NC on the target DC then the source
2118 # DC should have functional level at minimum WIN2008
2120 # Effectively we're saying that in order to replicate
2121 # to a targeted RODC (which was introduced in Windows 2008)
2122 # then we have to replicate from a DC that is also minimally
2123 # at that level.
2125 # You can also see this requirement in the MS special
2126 # considerations for RODC which state that to deploy
2127 # an RODC, at least one writable domain controller in
2128 # the domain must be running Windows Server 2008
2129 if ro and not partial and nc_x.nc_type == NCType.domain:
2130 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2131 continue
2133 # If we haven't been told to turn off stale connection
2134 # detection and this dsa has a stale connection then
2135 # continue
2136 if detect_stale and self.is_stale_link_connection(dc_s):
2137 continue
2139 # Replica meets criteria. Add it to table indexed
2140 # by the GUID of the DC that it appears on
2141 r_list.append(f_of_x)
2143 # If a partial (not full) replica of NC (x) "should be present"
2144 # on the local DC, append to R each partial replica (p of x)
2145 # such that p "is present" on a DC satisfying the same
2146 # criteria defined above for full replica DCs.
2148 # XXX This loop and the previous one differ only in whether
2149 # the replica is partial or not. here we only accept partial
2150 # (because we're partial); before we only accepted full. Order
2151 # doen't matter (the list is sorted a few lines down) so these
2152 # loops could easily be merged. Or this could be a helper
2153 # function.
2155 if partial:
2156 # Now we loop thru all the DSAs looking for
2157 # partial NC replicas that match the naming
2158 # context dn for (NC x)
2159 for dc_s in self.my_site.dsa_table.values():
2161 # If this partition NC (x) doesn't appear as a
2162 # replica (p) of NC (x) on the dsa DC (s) then
2163 # continue
2164 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2165 continue
2167 # Pull out the NCReplica with the dn that
2168 # matches NC (x) we are examining.
2169 p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2171 # Replica (p) of NC (x) must be partial
2172 if not p_of_x.is_partial():
2173 continue
2175 # Replica (p) of NC (x) must satisfy the
2176 # "is present" criteria for DC (s) that
2177 # it was found on
2178 if not p_of_x.is_present():
2179 continue
2181 # DC (s) must be a writable DSA other than
2182 # my DSA. In other words we'd only replicate
2183 # from other writable DSA
2184 if dc_s.is_ro() or dc_s is dc_local:
2185 continue
2187 # Certain replica graphs are produced only
2188 # for global catalogs, so test against
2189 # method input parameter
2190 if gc_only and not dc_s.is_gc():
2191 continue
2193 # If we haven't been told to turn off stale connection
2194 # detection and this dsa has a stale connection then
2195 # continue
2196 if detect_stale and self.is_stale_link_connection(dc_s):
2197 continue
2199 # Replica meets criteria. Add it to table indexed
2200 # by the GUID of the DSA that it appears on
2201 r_list.append(p_of_x)
2203 # Append to R the NC replica that "should be present"
2204 # on the local DC
2205 r_list.append(l_of_x)
2207 r_list.sort(sort_replica_by_dsa_guid)
2208 r_len = len(r_list)
2210 max_node_edges = self.intrasite_max_node_edges(r_len)
2212 # Add a node for each r_list element to the replica graph
2213 graph_list = []
2214 for rep in r_list:
2215 node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
2216 graph_list.append(node)
2218 # For each r(i) from (0 <= i < |R|-1)
2219 i = 0
2220 while i < (r_len-1):
2221 # Add an edge from r(i) to r(i+1) if r(i) is a full
2222 # replica or r(i+1) is a partial replica
2223 if not r_list[i].is_partial() or r_list[i+1].is_partial():
2224 graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)
2226 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2227 # replica or ri is a partial replica.
2228 if not r_list[i+1].is_partial() or r_list[i].is_partial():
2229 graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
2230 i = i + 1
2232 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2233 # or r0 is a partial replica.
2234 if not r_list[r_len-1].is_partial() or r_list[0].is_partial():
2235 graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr)
2237 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2238 # r|R|-1 is a partial replica.
2239 if not r_list[0].is_partial() or r_list[r_len-1].is_partial():
2240 graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr)
2242 DEBUG("r_list is length %s" % len(r_list))
2243 DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr))
2244 for x in r_list))
2246 do_dot_files = opts.dot_files and opts.debug
2247 if opts.verify or do_dot_files:
2248 dot_edges = []
2249 dot_vertices = set()
2250 for v1 in graph_list:
2251 dot_vertices.add(v1.dsa_dnstr)
2252 for v2 in v1.edge_from:
2253 dot_edges.append((v2, v1.dsa_dnstr))
2254 dot_vertices.add(v2)
2256 verify_properties = ('connected', 'directed_double_ring_or_small')
2257 verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
2258 label='%s__%s__%s' % (site_local.site_dnstr,
2259 nctype_lut[nc_x.nc_type],
2260 nc_x.nc_dnstr),
2261 properties=verify_properties, debug=DEBUG,
2262 verify=opts.verify,
2263 dot_files=do_dot_files, directed=True)
2265 # For each existing nTDSConnection object implying an edge
2266 # from rj of R to ri such that j != i, an edge from rj to ri
2267 # is not already in the graph, and the total edges directed
2268 # to ri is less than n+2, the KCC adds that edge to the graph.
2269 for vertex in graph_list:
2270 dsa = self.my_site.dsa_table[vertex.dsa_dnstr]
2271 for connect in dsa.connect_table.values():
2272 remote = connect.from_dnstr
2273 if remote in self.my_site.dsa_table:
2274 vertex.add_edge_from(remote)
2276 DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list))
2277 DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list))
2279 for tnode in graph_list:
2280 # To optimize replication latency in sites with many NC
2281 # replicas, the KCC adds new edges directed to ri to bring
2282 # the total edges to n+2, where the NC replica rk of R
2283 # from which the edge is directed is chosen at random such
2284 # that k != i and an edge from rk to ri is not already in
2285 # the graph.
2287 # Note that the KCC tech ref does not give a number for
2288 # the definition of "sites with many NC replicas". At a
2289 # bare minimum to satisfy n+2 edges directed at a node we
2290 # have to have at least three replicas in |R| (i.e. if n
2291 # is zero then at least replicas from two other graph
2292 # nodes may direct edges to us).
2293 if r_len >= 3 and not tnode.has_sufficient_edges():
2294 candidates = [x for x in graph_list if
2295 (x is not tnode and
2296 x.dsa_dnstr not in tnode.edge_from)]
2298 DEBUG_BLUE("looking for random link for %s. r_len %d, "
2299 "graph len %d candidates %d"
2300 % (tnode.dsa_dnstr, r_len, len(graph_list),
2301 len(candidates)))
2303 DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])
2305 while candidates and not tnode.has_sufficient_edges():
2306 other = random.choice(candidates)
2307 DEBUG("trying to add candidate %s" % other.dsa_dstr)
2308 if not tnode.add_edge_from(other):
2309 DEBUG_RED("could not add %s" % other.dsa_dstr)
2310 candidates.remove(other)
2311 else:
2312 DEBUG_FN("not adding links to %s: nodes %s, links is %s/%s" %
2313 (tnode.dsa_dnstr, r_len, len(tnode.edge_from),
2314 tnode.max_edges))
2316 # Print the graph node in debug mode
2317 logger.debug("%s" % tnode)
2319 # For each edge directed to the local DC, ensure a nTDSConnection
2320 # points to us that satisfies the KCC criteria
2322 if tnode.dsa_dnstr == dc_local.dsa_dnstr:
2323 tnode.add_connections_from_edges(dc_local)
2325 if opts.verify or do_dot_files:
2326 dot_edges = []
2327 dot_vertices = set()
2328 for v1 in graph_list:
2329 dot_vertices.add(v1.dsa_dnstr)
2330 for v2 in v1.edge_from:
2331 dot_edges.append((v2, v1.dsa_dnstr))
2332 dot_vertices.add(v2)
2334 verify_properties = ('connected', 'directed_double_ring_or_small')
2335 verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
2336 label='%s__%s__%s' % (site_local.site_dnstr,
2337 nctype_lut[nc_x.nc_type],
2338 nc_x.nc_dnstr),
2339 properties=verify_properties, debug=DEBUG,
2340 verify=opts.verify,
2341 dot_files=do_dot_files, directed=True)
2343 def intrasite(self):
2344 """The head method for generating the intra-site KCC replica
2345 connection graph and attendant nTDSConnection objects
2346 in the samdb
2348 # Retrieve my DSA
2349 mydsa = self.my_dsa
2351 logger.debug("intrasite(): enter")
2353 # Test whether local site has topology disabled
2354 mysite = self.my_site
2355 if mysite.is_intrasite_topology_disabled():
2356 return
2358 detect_stale = (not mysite.is_detect_stale_disabled())
2359 for connect in mydsa.connect_table.values():
2360 if connect.to_be_added:
2361 DEBUG_CYAN("TO BE ADDED:\n%s" % connect)
2363 # Loop thru all the partitions, with gc_only False
2364 for partdn, part in self.part_table.items():
2365 self.construct_intrasite_graph(mysite, mydsa, part, False,
2366 detect_stale)
2367 for connect in mydsa.connect_table.values():
2368 if connect.to_be_added:
2369 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2371 # If the DC is a GC server, the KCC constructs an additional NC
2372 # replica graph (and creates nTDSConnection objects) for the
2373 # config NC as above, except that only NC replicas that "are present"
2374 # on GC servers are added to R.
2375 for connect in mydsa.connect_table.values():
2376 if connect.to_be_added:
2377 DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)
2379 # Do it again, with gc_only True
2380 for partdn, part in self.part_table.items():
2381 if part.is_config():
2382 self.construct_intrasite_graph(mysite, mydsa, part, True,
2383 detect_stale)
2385 # The DC repeats the NC replica graph computation and nTDSConnection
2386 # creation for each of the NC replica graphs, this time assuming
2387 # that no DC has failed. It does so by re-executing the steps as
2388 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2389 # set in the options attribute of the site settings object for
2390 # the local DC's site. (ie. we set "detec_stale" flag to False)
2391 for connect in mydsa.connect_table.values():
2392 if connect.to_be_added:
2393 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2395 # Loop thru all the partitions.
2396 for partdn, part in self.part_table.items():
2397 self.construct_intrasite_graph(mysite, mydsa, part, False,
2398 False) # don't detect stale
2400 # If the DC is a GC server, the KCC constructs an additional NC
2401 # replica graph (and creates nTDSConnection objects) for the
2402 # config NC as above, except that only NC replicas that "are present"
2403 # on GC servers are added to R.
2404 for connect in mydsa.connect_table.values():
2405 if connect.to_be_added:
2406 DEBUG_RED("TO BE ADDED:\n%s" % connect)
2408 for partdn, part in self.part_table.items():
2409 if part.is_config():
2410 self.construct_intrasite_graph(mysite, mydsa, part, True,
2411 False) # don't detect stale
2413 if opts.readonly:
2414 # Display any to be added or modified repsFrom
2415 for connect in mydsa.connect_table.values():
2416 if connect.to_be_deleted:
2417 logger.info("TO BE DELETED:\n%s" % connect)
2418 if connect.to_be_modified:
2419 logger.info("TO BE MODIFIED:\n%s" % connect)
2420 if connect.to_be_added:
2421 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
2423 mydsa.commit_connections(self.samdb, ro=True)
2424 else:
2425 # Commit any newly created connections to the samdb
2426 mydsa.commit_connections(self.samdb)
2428 def list_dsas(self):
2429 """Compile a comprehensive list of DSA DNs
2431 These are all the DSAs on all the sites that KCC would be
2432 dealing with.
2434 This method is not idempotent and may not work correctly in
2435 sequence with KCC.run().
2437 :return: a list of DSA DN strings.
2439 self.load_my_site()
2440 self.load_my_dsa()
2442 self.load_all_sites()
2443 self.load_all_partitions()
2444 self.load_all_transports()
2445 self.load_all_sitelinks()
2446 dsas = []
2447 for site in self.site_table.values():
2448 dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1)
2449 for dsa in site.dsa_table.values()])
2450 return dsas
2452 def load_samdb(self, dburl, lp, creds):
2453 """Load the database using an url, loadparm, and credentials
2455 :param dburl: a database url.
2456 :param lp: a loadparm object.
2457 :param cred: a Credentials object.
2459 self.samdb = SamDB(url=dburl,
2460 session_info=system_session(),
2461 credentials=creds, lp=lp)
2463 def plot_all_connections(self, basename, verify_properties=()):
2464 verify = verify_properties and opts.verify
2465 plot = opts.dot_files
2466 if not (verify or plot):
2467 return
2469 dot_edges = []
2470 dot_vertices = []
2471 edge_colours = []
2472 vertex_colours = []
2474 for dsa in self.dsa_by_dnstr.values():
2475 dot_vertices.append(dsa.dsa_dnstr)
2476 if dsa.is_ro():
2477 vertex_colours.append('#cc0000')
2478 else:
2479 vertex_colours.append('#0000cc')
2480 for con in dsa.connect_table.values():
2481 if con.is_rodc_topology():
2482 edge_colours.append('red')
2483 else:
2484 edge_colours.append('blue')
2485 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
2487 verify_and_dot(basename, dot_edges, vertices=dot_vertices,
2488 label=self.my_dsa_dnstr, properties=verify_properties,
2489 debug=DEBUG, verify=verify, dot_files=plot,
2490 directed=True, edge_colors=edge_colours,
2491 vertex_colors=vertex_colours)
2493 def run(self, dburl, lp, creds, forced_local_dsa=None,
2494 forget_local_links=False, forget_intersite_links=False):
2495 """Method to perform a complete run of the KCC and
2496 produce an updated topology for subsequent NC replica
2497 syncronization between domain controllers
2499 # We may already have a samdb setup if we are
2500 # currently importing an ldif for a test run
2501 if self.samdb is None:
2502 try:
2503 self.load_samdb(dburl, lp, creds)
2504 except ldb.LdbError, (num, msg):
2505 logger.error("Unable to open sam database %s : %s" %
2506 (dburl, msg))
2507 return 1
2509 if forced_local_dsa:
2510 self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" %
2511 forced_local_dsa)
2513 try:
2514 # Setup
2515 self.load_my_site()
2516 self.load_my_dsa()
2518 self.load_all_sites()
2519 self.load_all_partitions()
2520 self.load_all_transports()
2521 self.load_all_sitelinks()
2523 if opts.verify or opts.dot_files:
2524 guid_to_dnstr = {}
2525 for site in self.site_table.values():
2526 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
2527 for dnstr, dsa
2528 in site.dsa_table.items())
2530 self.plot_all_connections('dsa_initial')
2532 dot_edges = []
2533 current_reps, needed_reps = self.my_dsa.get_rep_tables()
2534 for dnstr, c_rep in current_reps.items():
2535 DEBUG("c_rep %s" % c_rep)
2536 dot_edges.append((self.my_dsa.dsa_dnstr, dnstr))
2538 verify_and_dot('dsa_repsFrom_initial', dot_edges,
2539 directed=True, label=self.my_dsa_dnstr,
2540 properties=(), debug=DEBUG, verify=opts.verify,
2541 dot_files=opts.dot_files)
2543 dot_edges = []
2544 for site in self.site_table.values():
2545 for dsa in site.dsa_table.values():
2546 current_reps, needed_reps = dsa.get_rep_tables()
2547 for dn_str, rep in current_reps.items():
2548 for reps_from in rep.rep_repsFrom:
2549 DEBUG("rep %s" % rep)
2550 dsa_guid = str(reps_from.source_dsa_obj_guid)
2551 dsa_dn = guid_to_dnstr[dsa_guid]
2552 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2554 verify_and_dot('dsa_repsFrom_initial_all', dot_edges,
2555 directed=True, label=self.my_dsa_dnstr,
2556 properties=(), debug=DEBUG, verify=opts.verify,
2557 dot_files=opts.dot_files)
2559 dot_edges = []
2560 for link in self.sitelink_table.values():
2561 for a, b in itertools.combinations(link.site_list, 2):
2562 dot_edges.append((str(a), str(b)))
2563 properties = ('connected',)
2564 verify_and_dot('dsa_sitelink_initial', dot_edges,
2565 directed=False,
2566 label=self.my_dsa_dnstr, properties=properties,
2567 debug=DEBUG, verify=opts.verify,
2568 dot_files=opts.dot_files)
2570 if forget_local_links:
2571 for dsa in self.my_site.dsa_table.values():
2572 dsa.connect_table = {k: v for k, v in
2573 dsa.connect_table.items()
2574 if v.is_rodc_topology()}
2575 self.plot_all_connections('dsa_forgotten_local')
2577 if forget_intersite_links:
2578 for site in self.site_table.values():
2579 for dsa in site.dsa_table.values():
2580 dsa.connect_table = {k: v for k, v in
2581 dsa.connect_table.items()
2582 if site is self.my_site and
2583 v.is_rodc_topology()}
2585 self.plot_all_connections('dsa_forgotten_all')
2586 # These are the published steps (in order) for the
2587 # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)
2589 # Step 1
2590 self.refresh_failed_links_connections()
2592 # Step 2
2593 self.intrasite()
2595 # Step 3
2596 all_connected = self.intersite()
2598 # Step 4
2599 self.remove_unneeded_ntdsconn(all_connected)
2601 # Step 5
2602 self.translate_ntdsconn()
2604 # Step 6
2605 self.remove_unneeded_failed_links_connections()
2607 # Step 7
2608 self.update_rodc_connection()
2610 if opts.verify or opts.dot_files:
2611 self.plot_all_connections('dsa_final',
2612 ('connected', 'forest_of_rings'))
2614 DEBUG_MAGENTA("there are %d dsa guids" % len(guid_to_dnstr))
2616 dot_edges = []
2617 edge_colors = []
2618 my_dnstr = self.my_dsa.dsa_dnstr
2619 current_reps, needed_reps = self.my_dsa.get_rep_tables()
2620 for dnstr, n_rep in needed_reps.items():
2621 for reps_from in n_rep.rep_repsFrom:
2622 guid_str = str(reps_from.source_dsa_obj_guid)
2623 dot_edges.append((my_dnstr, guid_to_dnstr[guid_str]))
2624 edge_colors.append('#' + str(n_rep.nc_guid)[:6])
2626 verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True,
2627 label=self.my_dsa_dnstr,
2628 properties=(), debug=DEBUG, verify=opts.verify,
2629 dot_files=opts.dot_files,
2630 edge_colors=edge_colors)
2632 dot_edges = []
2634 for site in self.site_table.values():
2635 for dsa in site.dsa_table.values():
2636 current_reps, needed_reps = dsa.get_rep_tables()
2637 for n_rep in needed_reps.values():
2638 for reps_from in n_rep.rep_repsFrom:
2639 dsa_guid = str(reps_from.source_dsa_obj_guid)
2640 dsa_dn = guid_to_dnstr[dsa_guid]
2641 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2643 verify_and_dot('dsa_repsFrom_final_all', dot_edges,
2644 directed=True, label=self.my_dsa_dnstr,
2645 properties=(), debug=DEBUG, verify=opts.verify,
2646 dot_files=opts.dot_files)
2648 except:
2649 raise
2651 return 0
2653 def import_ldif(self, dburl, lp, creds, ldif_file):
2654 """Import all objects and attributes that are relevent
2655 to the KCC algorithms from a previously exported LDIF file.
2657 The point of this function is to allow a programmer/debugger to
2658 import an LDIF file with non-security relevent information that
2659 was previously extracted from a DC database. The LDIF file is used
2660 to create a temporary abbreviated database. The KCC algorithm can
2661 then run against this abbreviated database for debug or test
2662 verification that the topology generated is computationally the
2663 same between different OSes and algorithms.
2665 :param dburl: path to the temporary abbreviated db to create
2666 :param ldif_file: path to the ldif file to import
2668 try:
2669 self.samdb = ldif_utils.ldif_to_samdb(dburl, lp, ldif_file,
2670 opts.forced_local_dsa)
2671 except ldif_utils.LdifError, e:
2672 print e
2673 return 1
2674 return 0
2676 def export_ldif(self, dburl, lp, creds, ldif_file):
2677 """Routine to extract all objects and attributes that are relevent
2678 to the KCC algorithms from a DC database.
2680 The point of this function is to allow a programmer/debugger to
2681 extract an LDIF file with non-security relevent information from
2682 a DC database. The LDIF file can then be used to "import" via
2683 the import_ldif() function this file into a temporary abbreviated
2684 database. The KCC algorithm can then run against this abbreviated
2685 database for debug or test verification that the topology generated
2686 is computationally the same between different OSes and algorithms.
2688 :param dburl: LDAP database URL to extract info from
2689 :param ldif_file: output LDIF file name to create
2691 try:
2692 ldif_utils.samdb_to_ldif_file(self.samdb, dburl, lp, creds,
2693 ldif_file)
2694 except ldif_utils.LdifError, e:
2695 print e
2696 return 1
2697 return 0
2699 ##################################################
2700 # Global Functions
2701 ##################################################
2704 def get_spanning_tree_edges(graph, my_site, label=None):
2705 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
2706 # just the shortest-paths connecting colored vertices
2708 internal_edges = set()
2710 for e_set in graph.edge_set:
2711 edgeType = None
2712 for v in graph.vertices:
2713 v.edges = []
2715 # All con_type in an edge set is the same
2716 for e in e_set.edges:
2717 edgeType = e.con_type
2718 for v in e.vertices:
2719 v.edges.append(e)
2721 if opts.verify or opts.dot_files:
2722 graph_edges = [(a.site.site_dnstr, b.site.site_dnstr)
2723 for a, b in
2724 itertools.chain(
2725 *(itertools.combinations(edge.vertices, 2)
2726 for edge in e_set.edges))]
2727 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2729 if opts.dot_files and opts.debug:
2730 write_dot_file('edgeset_%s' % (edgeType,), graph_edges,
2731 vertices=graph_nodes, label=label)
2733 if opts.verify:
2734 verify_graph('spanning tree edge set %s' % edgeType,
2735 graph_edges, vertices=graph_nodes,
2736 properties=('complete', 'connected'),
2737 debug=DEBUG)
2739 # Run dijkstra's algorithm with just the red vertices as seeds
2740 # Seed from the full replicas
2741 dijkstra(graph, edgeType, False)
2743 # Process edge set
2744 process_edge_set(graph, e_set, internal_edges)
2746 # Run dijkstra's algorithm with red and black vertices as the seeds
2747 # Seed from both full and partial replicas
2748 dijkstra(graph, edgeType, True)
2750 # Process edge set
2751 process_edge_set(graph, e_set, internal_edges)
2753 # All vertices have root/component as itself
2754 setup_vertices(graph)
2755 process_edge_set(graph, None, internal_edges)
2757 if opts.verify or opts.dot_files:
2758 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
2759 for e in internal_edges]
2760 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2761 verify_properties = ('multi_edge_forest',)
2762 verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label,
2763 properties=verify_properties, debug=DEBUG,
2764 verify=opts.verify,
2765 dot_files=opts.dot_files)
2767 # Phase 2: Run Kruskal's on the internal edges
2768 output_edges, components = kruskal(graph, internal_edges)
2770 # This recalculates the cost for the path connecting the
2771 # closest red vertex. Ignoring types is fine because NO
2772 # suboptimal edge should exist in the graph
2773 dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename
2774 # Phase 3: Process the output
2775 for v in graph.vertices:
2776 if v.is_red():
2777 v.dist_to_red = 0
2778 else:
2779 v.dist_to_red = v.repl_info.cost
2781 if opts.verify or opts.dot_files:
2782 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
2783 for e in internal_edges]
2784 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2785 verify_properties = ('multi_edge_forest',)
2786 verify_and_dot('postkruskal', graph_edges, graph_nodes,
2787 label=label, properties=verify_properties,
2788 debug=DEBUG, verify=opts.verify,
2789 dot_files=opts.dot_files)
2791 # Ensure only one-way connections for partial-replicas,
2792 # and make sure they point the right way.
2793 edge_list = []
2794 for edge in output_edges:
2795 # We know these edges only have two endpoints because we made
2796 # them.
2797 v, w = edge.vertices
2798 if v.site is my_site or w.site is my_site:
2799 if (((v.is_black() or w.is_black()) and
2800 v.dist_to_red != MAX_DWORD)):
2801 edge.directed = True
2803 if w.dist_to_red < v.dist_to_red:
2804 edge.vertices[:] = w, v
2805 edge_list.append(edge)
2807 if opts.verify or opts.dot_files:
2808 graph_edges = [[x.site.site_dnstr for x in e.vertices]
2809 for e in edge_list]
2810 #add the reverse edge if not directed.
2811 graph_edges.extend([x.site.site_dnstr
2812 for x in reversed(e.vertices)]
2813 for e in edge_list if not e.directed)
2814 graph_nodes = [x.site.site_dnstr for x in graph.vertices]
2815 verify_properties = ()
2816 verify_and_dot('post-one-way-partial', graph_edges, graph_nodes,
2817 label=label, properties=verify_properties,
2818 debug=DEBUG, verify=opts.verify,
2819 directed=True,
2820 dot_files=opts.dot_files)
2822 # count the components
2823 return edge_list, components
2826 def sort_replica_by_dsa_guid(rep1, rep2):
2827 """Helper to sort NCReplicas by their DSA guids
2829 The guids need to be sorted in their NDR form.
2831 :param rep1: An NC replica
2832 :param rep2: Another replica
2833 :return: -1, 0, or 1, indicating sort order.
2835 return cmp(ndr_pack(rep1.rep_dsa_guid), ndr_pack(rep2.rep_dsa_guid))
2838 def sort_dsa_by_gc_and_guid(dsa1, dsa2):
2839 """Helper to sort DSAs by guid global catalog status
2841 GC DSAs come before non-GC DSAs, other than that, the guids are
2842 sorted in NDR form.
2844 :param dsa1: A DSA object
2845 :param dsa2: Another DSA
2846 :return: -1, 0, or 1, indicating sort order.
2848 if dsa1.is_gc() and not dsa2.is_gc():
2849 return -1
2850 if not dsa1.is_gc() and dsa2.is_gc():
2851 return +1
2852 return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))
2855 def is_smtp_replication_available():
2856 """Can the KCC use SMTP replication?
2858 Currently always returns false because Samba doesn't implement
2859 SMTP transfer for NC changes between DCs.
2861 :return: Boolean (always False)
2863 return False
2866 def create_edge(con_type, site_link, guid_to_vertex):
2867 e = MultiEdge()
2868 e.site_link = site_link
2869 e.vertices = []
2870 for site_guid in site_link.site_list:
2871 if str(site_guid) in guid_to_vertex:
2872 e.vertices.extend(guid_to_vertex.get(str(site_guid)))
2873 e.repl_info.cost = site_link.cost
2874 e.repl_info.options = site_link.options
2875 e.repl_info.interval = site_link.interval
2876 e.repl_info.schedule = convert_schedule_to_repltimes(site_link.schedule)
2877 e.con_type = con_type
2878 e.directed = False
2879 return e
2882 def create_auto_edge_set(graph, transport):
2883 e_set = MultiEdgeSet()
2884 # use a NULL guid, not associated with a SiteLinkBridge object
2885 e_set.guid = misc.GUID()
2886 for site_link in graph.edges:
2887 if site_link.con_type == transport:
2888 e_set.edges.append(site_link)
2890 return e_set
2893 def create_edge_set(graph, transport, site_link_bridge):
2894 # TODO not implemented - need to store all site link bridges
2895 e_set = MultiEdgeSet()
2896 # e_set.guid = site_link_bridge
2897 return e_set
2900 def setup_vertices(graph):
2901 for v in graph.vertices:
2902 if v.is_white():
2903 v.repl_info.cost = MAX_DWORD
2904 v.root = None
2905 v.component_id = None
2906 else:
2907 v.repl_info.cost = 0
2908 v.root = v
2909 v.component_id = v
2911 v.repl_info.interval = 0
2912 v.repl_info.options = 0xFFFFFFFF
2913 v.repl_info.schedule = None # TODO highly suspicious
2914 v.demoted = False
2917 def dijkstra(graph, edge_type, include_black):
2918 queue = []
2919 setup_dijkstra(graph, edge_type, include_black, queue)
2920 while len(queue) > 0:
2921 cost, guid, vertex = heapq.heappop(queue)
2922 for edge in vertex.edges:
2923 for v in edge.vertices:
2924 if v is not vertex:
2925 # add new path from vertex to v
2926 try_new_path(graph, queue, vertex, edge, v)
2929 def setup_dijkstra(graph, edge_type, include_black, queue):
2930 setup_vertices(graph)
2931 for vertex in graph.vertices:
2932 if vertex.is_white():
2933 continue
2935 if (((vertex.is_black() and not include_black)
2936 or edge_type not in vertex.accept_black
2937 or edge_type not in vertex.accept_red_red)):
2938 vertex.repl_info.cost = MAX_DWORD
2939 vertex.root = None # NULL GUID
2940 vertex.demoted = True # Demoted appears not to be used
2941 else:
2942 heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex))
2945 def try_new_path(graph, queue, vfrom, edge, vto):
2946 newRI = ReplInfo()
2947 # What this function checks is that there is a valid time frame for
2948 # which replication can actually occur, despite being adequately
2949 # connected
2950 intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI)
2952 # If the new path costs more than the current, then ignore the edge
2953 if newRI.cost > vto.repl_info.cost:
2954 return
2956 if newRI.cost < vto.repl_info.cost and not intersect:
2957 return
2959 new_duration = total_schedule(newRI.schedule)
2960 old_duration = total_schedule(vto.repl_info.schedule)
2962 # Cheaper or longer schedule
2963 if newRI.cost < vto.repl_info.cost or new_duration > old_duration:
2964 vto.root = vfrom.root
2965 vto.component_id = vfrom.component_id
2966 vto.repl_info = newRI
2967 heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))
2970 def check_demote_vertex(vertex, edge_type):
2971 if vertex.is_white():
2972 return
2974 # Accepts neither red-red nor black edges, demote
2975 if ((edge_type not in vertex.accept_black and
2976 edge_type not in vertex.accept_red_red)):
2977 vertex.repl_info.cost = MAX_DWORD
2978 vertex.root = None
2979 vertex.demoted = True # Demoted appears not to be used
2982 def undemote_vertex(vertex):
2983 if vertex.is_white():
2984 return
2986 vertex.repl_info.cost = 0
2987 vertex.root = vertex
2988 vertex.demoted = False
2991 def process_edge_set(graph, e_set, internal_edges):
2992 if e_set is None:
2993 for edge in graph.edges:
2994 for vertex in edge.vertices:
2995 check_demote_vertex(vertex, edge.con_type)
2996 process_edge(graph, edge, internal_edges)
2997 for vertex in edge.vertices:
2998 undemote_vertex(vertex)
2999 else:
3000 for edge in e_set.edges:
3001 process_edge(graph, edge, internal_edges)
3004 def process_edge(graph, examine, internal_edges):
3005 # Find the set of all vertices touches the edge to examine
3006 vertices = []
3007 for v in examine.vertices:
3008 # Append a 4-tuple of color, repl cost, guid and vertex
3009 vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v))
3010 # Sort by color, lower
3011 DEBUG("vertices is %s" % vertices)
3012 vertices.sort()
3014 color, cost, guid, bestv = vertices[0]
3015 # Add to internal edges an edge from every colored vertex to bestV
3016 for v in examine.vertices:
3017 if v.component_id is None or v.root is None:
3018 continue
3020 # Only add edge if valid inter-tree edge - needs a root and
3021 # different components
3022 if ((bestv.component_id is not None and
3023 bestv.root is not None and
3024 v.component_id is not None and
3025 v.root is not None and
3026 bestv.component_id != v.component_id)):
3027 add_int_edge(graph, internal_edges, examine, bestv, v)
3030 # Add internal edge, endpoints are roots of the vertices to pass in
3031 # and are always colored
3032 def add_int_edge(graph, internal_edges, examine, v1, v2):
3033 root1 = v1.root
3034 root2 = v2.root
3036 red_red = False
3037 if root1.is_red() and root2.is_red():
3038 red_red = True
3040 if red_red:
3041 if ((examine.con_type not in root1.accept_red_red
3042 or examine.con_type not in root2.accept_red_red)):
3043 return
3044 elif (examine.con_type not in root1.accept_black
3045 or examine.con_type not in root2.accept_black):
3046 return
3048 ri = ReplInfo()
3049 ri2 = ReplInfo()
3051 # Create the transitive replInfo for the two trees and this edge
3052 if not combine_repl_info(v1.repl_info, v2.repl_info, ri):
3053 return
3054 # ri is now initialized
3055 if not combine_repl_info(ri, examine.repl_info, ri2):
3056 return
3058 newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type,
3059 examine.site_link)
3060 # Order by vertex guid
3061 #XXX guid comparison using ndr_pack
3062 if newIntEdge.v1.ndrpacked_guid > newIntEdge.v2.ndrpacked_guid:
3063 newIntEdge.v1 = root2
3064 newIntEdge.v2 = root1
3066 internal_edges.add(newIntEdge)
3069 def kruskal(graph, edges):
3070 for v in graph.vertices:
3071 v.edges = []
3073 components = set([x for x in graph.vertices if not x.is_white()])
3074 edges = list(edges)
3076 # Sorted based on internal comparison function of internal edge
3077 edges.sort()
3079 #XXX expected_num_tree_edges is never used
3080 expected_num_tree_edges = 0 # TODO this value makes little sense
3082 count_edges = 0
3083 output_edges = []
3084 index = 0
3085 while index < len(edges): # TODO and num_components > 1
3086 e = edges[index]
3087 parent1 = find_component(e.v1)
3088 parent2 = find_component(e.v2)
3089 if parent1 is not parent2:
3090 count_edges += 1
3091 add_out_edge(graph, output_edges, e)
3092 parent1.component_id = parent2
3093 components.discard(parent1)
3095 index += 1
3097 return output_edges, len(components)
3100 def find_component(vertex):
3101 if vertex.component_id is vertex:
3102 return vertex
3104 current = vertex
3105 while current.component_id is not current:
3106 current = current.component_id
3108 root = current
3109 current = vertex
3110 while current.component_id is not root:
3111 n = current.component_id
3112 current.component_id = root
3113 current = n
3115 return root
3118 def add_out_edge(graph, output_edges, e):
3119 v1 = e.v1
3120 v2 = e.v2
3122 # This multi-edge is a 'real' edge with no GUID
3123 ee = MultiEdge()
3124 ee.directed = False
3125 ee.site_link = e.site_link
3126 ee.vertices.append(v1)
3127 ee.vertices.append(v2)
3128 ee.con_type = e.e_type
3129 ee.repl_info = e.repl_info
3130 output_edges.append(ee)
3132 v1.edges.append(ee)
3133 v2.edges.append(ee)
3136 def test_all_reps_from(lp, creds, rng_seed=None):
3137 kcc = KCC()
3138 kcc.load_samdb(opts.dburl, lp, creds)
3139 dsas = kcc.list_dsas()
3140 needed_parts = {}
3141 current_parts = {}
3143 guid_to_dnstr = {}
3144 for site in kcc.site_table.values():
3145 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
3146 for dnstr, dsa in site.dsa_table.items())
3148 dot_edges = []
3149 dot_vertices = []
3150 colours = []
3151 vertex_colours = []
3153 for dsa_dn in dsas:
3154 if rng_seed:
3155 random.seed(rng_seed)
3156 kcc = KCC()
3157 kcc.run(opts.dburl, lp, creds, forced_local_dsa=dsa_dn,
3158 forget_local_links=opts.forget_local_links,
3159 forget_intersite_links=opts.forget_intersite_links)
3161 current, needed = kcc.my_dsa.get_rep_tables()
3163 for dsa in kcc.my_site.dsa_table.values():
3164 if dsa is kcc.my_dsa:
3165 continue
3166 kcc.translate_ntdsconn(dsa)
3167 c, n = dsa.get_rep_tables()
3168 current.update(c)
3169 needed.update(n)
3171 for name, rep_table, rep_parts in (
3172 ('needed', needed, needed_parts),
3173 ('current', current, current_parts)):
3174 for part, nc_rep in rep_table.items():
3175 edges = rep_parts.setdefault(part, [])
3176 for reps_from in nc_rep.rep_repsFrom:
3177 source = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
3178 dest = guid_to_dnstr[str(nc_rep.rep_dsa_guid)]
3179 edges.append((source, dest))
3181 for site in kcc.site_table.values():
3182 for dsa in site.dsa_table.values():
3183 if dsa.is_ro():
3184 vertex_colours.append('#cc0000')
3185 else:
3186 vertex_colours.append('#0000cc')
3187 dot_vertices.append(dsa.dsa_dnstr)
3188 if dsa.connect_table:
3189 DEBUG_FN("DSA %s %s connections:\n%s" %
3190 (dsa.dsa_dnstr, len(dsa.connect_table),
3191 [x.from_dnstr for x in
3192 dsa.connect_table.values()]))
3193 for con in dsa.connect_table.values():
3194 if con.is_rodc_topology():
3195 colours.append('red')
3196 else:
3197 colours.append('blue')
3198 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
3200 verify_and_dot('all-dsa-connections', dot_edges, vertices=dot_vertices,
3201 label="all dsa NTDSConnections", properties=(),
3202 debug=DEBUG, verify=opts.verify, dot_files=opts.dot_files,
3203 directed=True, edge_colors=colours,
3204 vertex_colors=vertex_colours)
3206 for name, rep_parts in (('needed', needed_parts),
3207 ('current', current_parts)):
3208 for part, edges in rep_parts.items():
3209 verify_and_dot('all-repsFrom_%s__%s' % (name, part), edges,
3210 directed=True, label=part,
3211 properties=(), debug=DEBUG, verify=opts.verify,
3212 dot_files=opts.dot_files)
3215 logger = logging.getLogger("samba_kcc")
3216 logger.addHandler(logging.StreamHandler(sys.stdout))
3217 DEBUG = logger.debug
3220 def _color_debug(*args, **kwargs):
3221 DEBUG('%s%s%s' % (kwargs['color'], args[0], C_NORMAL), *args[1:])
3223 _globals = globals()
3224 for _color in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW',
3225 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA',
3226 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'):
3227 _globals['DEBUG_' + _color] = partial(_color_debug, color=_globals[_color])
3230 def DEBUG_FN(msg=''):
3231 import traceback
3232 filename, lineno, function, text = traceback.extract_stack(None, 2)[0]
3233 DEBUG("%s%s:%s%s %s%s()%s '%s'" % (CYAN, filename, BLUE, lineno,
3234 CYAN, function, C_NORMAL, msg))
3237 ##################################################
3238 # samba_kcc entry point
3239 ##################################################
3241 parser = optparse.OptionParser("samba_kcc [options]")
3242 sambaopts = options.SambaOptions(parser)
3243 credopts = options.CredentialsOptions(parser)
3245 parser.add_option_group(sambaopts)
3246 parser.add_option_group(credopts)
3247 parser.add_option_group(options.VersionOptions(parser))
3249 parser.add_option("--readonly", default=False,
3250 help="compute topology but do not update database",
3251 action="store_true")
3253 parser.add_option("--debug",
3254 help="debug output",
3255 action="store_true")
3257 parser.add_option("--verify",
3258 help="verify that assorted invariants are kept",
3259 action="store_true")
3261 parser.add_option("--list-verify-tests",
3262 help=("list what verification actions are available "
3263 "and do nothing else"),
3264 action="store_true")
3266 parser.add_option("--no-dot-files", dest='dot_files',
3267 help="Don't write dot graph files in /tmp",
3268 default=True, action="store_false")
3270 parser.add_option("--seed",
3271 help="random number seed",
3272 type=int)
3274 parser.add_option("--importldif",
3275 help="import topology ldif file",
3276 type=str, metavar="<file>")
3278 parser.add_option("--exportldif",
3279 help="export topology ldif file",
3280 type=str, metavar="<file>")
3282 parser.add_option("-H", "--URL",
3283 help="LDB URL for database or target server",
3284 type=str, metavar="<URL>", dest="dburl")
3286 parser.add_option("--tmpdb",
3287 help="schemaless database file to create for ldif import",
3288 type=str, metavar="<file>")
3290 parser.add_option("--now",
3291 help=("assume current time is this ('YYYYmmddHHMMSS[tz]',"
3292 " default: system time)"),
3293 type=str, metavar="<date>")
3295 parser.add_option("--forced-local-dsa",
3296 help="run calculations assuming the DSA is this DN",
3297 type=str, metavar="<DSA>")
3299 parser.add_option("--attempt-live-connections", default=False,
3300 help="Attempt to connect to other DSAs to test links",
3301 action="store_true")
3303 parser.add_option("--list-valid-dsas", default=False,
3304 help=("Print a list of DSA dnstrs that could be"
3305 " used in --forced-local-dsa"),
3306 action="store_true")
3308 parser.add_option("--test-all-reps-from", default=False,
3309 help="Create and verify a graph of reps-from for every DSA",
3310 action="store_true")
3312 parser.add_option("--forget-local-links", default=False,
3313 help="pretend not to know the existing local topology",
3314 action="store_true")
3316 parser.add_option("--forget-intersite-links", default=False,
3317 help="pretend not to know the existing intersite topology",
3318 action="store_true")
3321 opts, args = parser.parse_args()
3324 if opts.list_verify_tests:
3325 list_verify_tests()
3326 sys.exit(0)
3328 if opts.debug:
3329 logger.setLevel(logging.DEBUG)
3330 elif opts.readonly:
3331 logger.setLevel(logging.INFO)
3332 else:
3333 logger.setLevel(logging.WARNING)
3335 # initialize seed from optional input parameter
3336 if opts.seed:
3337 random.seed(opts.seed)
3338 else:
3339 random.seed(0xACE5CA11)
3341 if opts.now:
3342 for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"):
3343 try:
3344 now_tuple = time.strptime(opts.now, timeformat)
3345 break
3346 except ValueError:
3347 pass
3348 else:
3349 # else happens if break doesn't --> no match
3350 print >> sys.stderr, "could not parse time '%s'" % opts.now
3351 sys.exit(1)
3353 unix_now = int(time.mktime(now_tuple))
3354 else:
3355 unix_now = int(time.time())
3357 nt_now = unix2nttime(unix_now)
3359 lp = sambaopts.get_loadparm()
3360 creds = credopts.get_credentials(lp, fallback_machine=True)
3362 if opts.dburl is None:
3363 opts.dburl = lp.samdb_url()
3365 if opts.test_all_reps_from:
3366 opts.readonly = True
3367 rng_seed = opts.seed or 0xACE5CA11
3368 test_all_reps_from(lp, creds, rng_seed=rng_seed)
3369 sys.exit()
3371 # Instantiate Knowledge Consistency Checker and perform run
3372 kcc = KCC()
3374 if opts.exportldif:
3375 rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
3376 sys.exit(rc)
3378 if opts.importldif:
3379 if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
3380 logger.error("Specify a target temp database file with --tmpdb option")
3381 sys.exit(1)
3383 rc = kcc.import_ldif(opts.tmpdb, lp, creds, opts.importldif)
3384 if rc != 0:
3385 sys.exit(rc)
3387 if opts.list_valid_dsas:
3388 kcc.load_samdb(opts.dburl, lp, creds)
3389 print '\n'.join(kcc.list_dsas())
3390 sys.exit()
3392 try:
3393 rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa,
3394 opts.forget_local_links, opts.forget_intersite_links)
3395 sys.exit(rc)
3397 except GraphError, e:
3398 print e
3399 sys.exit(1)