KCC: --test-all-reps-from uses same random seed for all DSAs
[Samba.git] / source4 / scripting / bin / samba_kcc
blob13d29e5d9c13311d01524c11f03df1a4e272e775
1 #!/usr/bin/env python
3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
6 # Copyright (C) Andrew Bartlett 2015
8 # Andrew Bartlett's alleged work performed by his underlings Douglas
9 # Bagnall and Garming Sam.
11 # This program is free software; you can redistribute it and/or modify
12 # it under the terms of the GNU General Public License as published by
13 # the Free Software Foundation; either version 3 of the License, or
14 # (at your option) any later version.
16 # This program is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU General Public License for more details.
21 # You should have received a copy of the GNU General Public License
22 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 import os
25 import sys
26 import random
27 import uuid
29 # ensure we get messages out immediately, so they get in the samba logs,
30 # and don't get swallowed by a timeout
31 os.environ['PYTHONUNBUFFERED'] = '1'
33 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
34 # heimdal can get mutual authentication errors due to the 24 second difference
35 # between UTC and GMT when using some zone files (eg. the PDT zone from
36 # the US)
37 os.environ["TZ"] = "GMT"
39 # Find right directory when running from source tree
40 sys.path.insert(0, "bin/python")
42 import optparse
43 import logging
44 import itertools
45 import heapq
46 import time
47 from functools import partial
49 from samba import (
50 getopt as options,
51 Ldb,
52 ldb,
53 dsdb,
54 read_and_sub_file,
55 drs_utils,
56 nttime2unix)
57 from samba.auth import system_session
58 from samba.samdb import SamDB
59 from samba.dcerpc import drsuapi
60 from samba.kcc_utils import *
61 from samba.graph_utils import *
62 from samba import ldif_utils
65 class KCC(object):
66 """The Knowledge Consistency Checker class.
68 A container for objects and methods allowing a run of the KCC. Produces a
69 set of connections in the samdb for which the Distributed Replication
70 Service can then utilize to replicate naming contexts
71 """
72 def __init__(self):
73 """Initializes the partitions class which can hold
74 our local DCs partitions or all the partitions in
75 the forest
76 """
77 self.part_table = {} # partition objects
78 self.site_table = {}
79 self.transport_table = {}
80 self.ip_transport = None
81 self.sitelink_table = {}
82 self.dsa_by_dnstr = {}
83 self.dsa_by_guid = {}
85 self.get_dsa_by_guidstr = self.dsa_by_guid.get
86 self.get_dsa = self.dsa_by_dnstr.get
88 # TODO: These should be backed by a 'permanent' store so that when
89 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
90 # the failure information can be returned
91 self.kcc_failed_links = {}
92 self.kcc_failed_connections = set()
94 # Used in inter-site topology computation. A list
95 # of connections (by NTDSConnection object) that are
96 # to be kept when pruning un-needed NTDS Connections
97 self.kept_connections = set()
99 self.my_dsa_dnstr = None # My dsa DN
100 self.my_dsa = None # My dsa object
102 self.my_site_dnstr = None
103 self.my_site = None
105 self.samdb = None
107 def load_all_transports(self):
108 """Loads the inter-site transport objects for Sites
110 ::returns: Raises KCCError on error
112 try:
113 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
114 self.samdb.get_config_basedn(),
115 scope=ldb.SCOPE_SUBTREE,
116 expression="(objectClass=interSiteTransport)")
117 except ldb.LdbError, (enum, estr):
118 raise KCCError("Unable to find inter-site transports - (%s)" %
119 estr)
121 for msg in res:
122 dnstr = str(msg.dn)
124 transport = Transport(dnstr)
126 transport.load_transport(self.samdb)
127 self.transport_table.setdefault(str(transport.guid),
128 transport)
129 if transport.name == 'IP':
130 self.ip_transport = transport
132 if self.ip_transport is None:
133 raise KCCError("there doesn't seem to be an IP transport")
135 def load_all_sitelinks(self):
136 """Loads the inter-site siteLink objects
138 ::returns: Raises KCCError on error
140 try:
141 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
142 self.samdb.get_config_basedn(),
143 scope=ldb.SCOPE_SUBTREE,
144 expression="(objectClass=siteLink)")
145 except ldb.LdbError, (enum, estr):
146 raise KCCError("Unable to find inter-site siteLinks - (%s)" % estr)
148 for msg in res:
149 dnstr = str(msg.dn)
151 # already loaded
152 if dnstr in self.sitelink_table:
153 continue
155 sitelink = SiteLink(dnstr)
157 sitelink.load_sitelink(self.samdb)
159 # Assign this siteLink to table
160 # and index by dn
161 self.sitelink_table[dnstr] = sitelink
163 def load_site(self, dn_str):
164 """Helper for load_my_site and load_all_sites. It puts all the site's
165 DSAs into the KCC indices.
167 site = Site(dn_str, unix_now)
168 site.load_site(self.samdb)
170 # I am not sure why, but we avoid replacing the site with an
171 # identical copy.
172 guid = str(site.site_guid)
173 if guid not in self.site_table:
174 self.site_table[guid] = site
175 self.dsa_by_dnstr.update(site.dsa_table)
176 self.dsa_by_guid.update((str(x.dsa_guid), x)
177 for x in site.dsa_table.values())
179 return self.site_table[guid]
181 def load_my_site(self):
182 """Loads the Site class for the local DSA
184 ::returns: Raises an Exception on error
186 self.my_site_dnstr = ("CN=%s,CN=Sites,%s" % (
187 self.samdb.server_site_name(),
188 self.samdb.get_config_basedn()))
190 self.my_site = self.load_site(self.my_site_dnstr)
192 def load_all_sites(self):
193 """Discover all sites and instantiate and load each
194 NTDS Site settings.
196 ::returns: Raises KCCError on error
198 try:
199 res = self.samdb.search("CN=Sites,%s" %
200 self.samdb.get_config_basedn(),
201 scope=ldb.SCOPE_SUBTREE,
202 expression="(objectClass=site)")
203 except ldb.LdbError, (enum, estr):
204 raise KCCError("Unable to find sites - (%s)" % estr)
206 for msg in res:
207 sitestr = str(msg.dn)
208 self.load_site(sitestr)
210 def load_my_dsa(self):
211 """Discover my nTDSDSA dn thru the rootDSE entry
213 ::returns: Raises KCCError on error.
215 dn = ldb.Dn(self.samdb, "<GUID=%s>" % self.samdb.get_ntds_GUID())
216 try:
217 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
218 attrs=["objectGUID"])
219 except ldb.LdbError, (enum, estr):
220 logger.warning("Search for %s failed: %s. This typically happens"
221 " in --importldif mode due to lack of module"
222 " support.", dn, estr)
223 try:
224 # We work around the failure above by looking at the
225 # dsServiceName that was put in the fake rootdse by
226 # the --exportldif, rather than the
227 # samdb.get_ntds_GUID(). The disadvantage is that this
228 # mode requires we modify the @ROOTDSE dnq to support
229 # --forced-local-dsa
230 service_name_res = self.samdb.search(base="",
231 scope=ldb.SCOPE_BASE,
232 attrs=["dsServiceName"])
233 dn = ldb.Dn(self.samdb,
234 service_name_res[0]["dsServiceName"][0])
236 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
237 attrs=["objectGUID"])
238 except ldb.LdbError, (enum, estr):
239 raise KCCError("Unable to find my nTDSDSA - (%s)" % estr)
241 if len(res) != 1:
242 raise KCCError("Unable to find my nTDSDSA at %s" %
243 dn.extended_str())
245 ntds_guid = misc.GUID(self.samdb.get_ntds_GUID())
246 if misc.GUID(res[0]["objectGUID"][0]) != ntds_guid:
247 raise KCCError("Did not find the GUID we expected,"
248 " perhaps due to --importldif")
250 self.my_dsa_dnstr = str(res[0].dn)
252 self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
254 if self.my_dsa_dnstr not in self.dsa_by_dnstr:
255 DEBUG_DARK_YELLOW("my_dsa %s isn't in self.dsas_by_dnstr:"
256 " it must be RODC.\n"
257 "Let's add it, because my_dsa is special!\n"
258 "(likewise for self.dsa_by_guid of course)" %
259 self.my_dsas_dnstr)
261 self.dsa_by_dnstr[self.my_dsa_dnstr] = self.my_dsa
262 self.dsa_by_guid[str(self.my_dsa.dsa_guid)] = self.my_dsa
264 def load_all_partitions(self):
265 """Discover all NCs thru the Partitions dn and
266 instantiate and load the NCs.
268 Each NC is inserted into the part_table by partition
269 dn string (not the nCName dn string)
271 ::returns: Raises KCCError on error
273 try:
274 res = self.samdb.search("CN=Partitions,%s" %
275 self.samdb.get_config_basedn(),
276 scope=ldb.SCOPE_SUBTREE,
277 expression="(objectClass=crossRef)")
278 except ldb.LdbError, (enum, estr):
279 raise KCCError("Unable to find partitions - (%s)" % estr)
281 for msg in res:
282 partstr = str(msg.dn)
284 # already loaded
285 if partstr in self.part_table:
286 continue
288 part = Partition(partstr)
290 part.load_partition(self.samdb)
291 self.part_table[partstr] = part
293 def should_be_present_test(self):
294 """Enumerate all loaded partitions and DSAs in local
295 site and test if NC should be present as replica
297 for partdn, part in self.part_table.items():
298 for dsadn, dsa in self.my_site.dsa_table.items():
299 needed, ro, partial = part.should_be_present(dsa)
300 logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
301 (dsadn, part.nc_dnstr, needed, ro, partial))
303 def refresh_failed_links_connections(self):
304 """Based on MS-ADTS 6.2.2.1"""
306 # Instead of NULL link with failure_count = 0, the tuple is
307 # simply removed
309 # LINKS: Refresh failed links
310 self.kcc_failed_links = {}
311 current, needed = self.my_dsa.get_rep_tables()
312 for replica in current.values():
313 # For every possible connection to replicate
314 for reps_from in replica.rep_repsFrom:
315 failure_count = reps_from.consecutive_sync_failures
316 if failure_count <= 0:
317 continue
319 dsa_guid = str(reps_from.source_dsa_obj_guid)
320 time_first_failure = reps_from.last_success
321 last_result = reps_from.last_attempt
322 dns_name = reps_from.dns_name1
324 f = self.kcc_failed_links.get(dsa_guid)
325 if not f:
326 f = KCCFailedObject(dsa_guid, failure_count,
327 time_first_failure, last_result,
328 dns_name)
329 self.kcc_failed_links[dsa_guid] = f
330 #elif f.failure_count == 0:
331 # f.failure_count = failure_count
332 # f.time_first_failure = time_first_failure
333 # f.last_result = last_result
334 else:
335 f.failure_count = max(f.failure_count, failure_count)
336 f.time_first_failure = min(f.time_first_failure,
337 time_first_failure)
338 f.last_result = last_result
340 # CONNECTIONS: Refresh failed connections
341 restore_connections = set()
342 if opts.attempt_live_connections:
343 DEBUG("refresh_failed_links: checking if links are still down")
344 for connection in self.kcc_failed_connections:
345 try:
346 drs_utils.drsuapi_connect(connection.dns_name, lp, creds)
347 # Failed connection is no longer failing
348 restore_connections.add(connection)
349 except drs_utils.drsException:
350 # Failed connection still failing
351 connection.failure_count += 1
352 else:
353 DEBUG("refresh_failed_links: not checking live links because we\n"
354 "weren't asked to --attempt-live-connections")
356 # Remove the restored connections from the failed connections
357 self.kcc_failed_connections.difference_update(restore_connections)
359 def is_stale_link_connection(self, target_dsa):
360 """Returns False if no tuple z exists in the kCCFailedLinks or
361 kCCFailedConnections variables such that z.UUIDDsa is the
362 objectGUID of the target dsa, z.FailureCount > 0, and
363 the current time - z.TimeFirstFailure > 2 hours.
365 # Returns True if tuple z exists...
366 failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
367 if failed_link:
368 # failure_count should be > 0, but check anyways
369 if failed_link.failure_count > 0:
370 unix_first_failure = \
371 nttime2unix(failed_link.time_first_failure)
372 # TODO guard against future
373 if unix_first_failure > unix_now:
374 logger.error("The last success time attribute for \
375 repsFrom is in the future!")
377 # Perform calculation in seconds
378 if (unix_now - unix_first_failure) > 60 * 60 * 2:
379 return True
381 # TODO connections
383 return False
385 # TODO: This should be backed by some form of local database
386 def remove_unneeded_failed_links_connections(self):
387 # Remove all tuples in kcc_failed_links where failure count = 0
388 # In this implementation, this should never happen.
390 # Remove all connections which were not used this run or connections
391 # that became active during this run.
392 pass
394 def remove_unneeded_ntdsconn(self, all_connected):
395 """Removes unneeded NTDS Connections after computation
396 of KCC intra and inter-site topology has finished.
398 mydsa = self.my_dsa
400 # Loop thru connections
401 for cn_conn in mydsa.connect_table.values():
402 if cn_conn.guid is None:
403 if opts.readonly:
404 cn_conn.guid = misc.GUID(str(uuid.uuid4()))
405 cn_conn.whenCreated = nt_now
406 else:
407 cn_conn.load_connection(self.samdb)
409 for cn_conn in mydsa.connect_table.values():
411 s_dnstr = cn_conn.get_from_dnstr()
412 if s_dnstr is None:
413 cn_conn.to_be_deleted = True
414 continue
416 # Get the source DSA no matter what site
417 s_dsa = self.get_dsa(s_dnstr)
419 #XXX should an RODC be regarded as same site
420 same_site = s_dnstr in self.my_site.dsa_table
422 # Given an nTDSConnection object cn, if the DC with the
423 # nTDSDSA object dc that is the parent object of cn and
424 # the DC with the nTDSDA object referenced by cn!fromServer
425 # are in the same site, the KCC on dc deletes cn if all of
426 # the following are true:
428 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
430 # No site settings object s exists for the local DC's site, or
431 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
432 # s!options.
434 # Another nTDSConnection object cn2 exists such that cn and
435 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
436 # and either
438 # cn!whenCreated < cn2!whenCreated
440 # cn!whenCreated = cn2!whenCreated and
441 # cn!objectGUID < cn2!objectGUID
443 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
444 if same_site:
445 if not cn_conn.is_generated():
446 continue
448 if self.my_site.is_cleanup_ntdsconn_disabled():
449 continue
451 # Loop thru connections looking for a duplicate that
452 # fulfills the previous criteria
453 lesser = False
454 packed_guid = ndr_pack(cn_conn.guid)
455 for cn2_conn in mydsa.connect_table.values():
456 if cn2_conn is cn_conn:
457 continue
459 s2_dnstr = cn2_conn.get_from_dnstr()
461 # If the NTDS Connections has a different
462 # fromServer field then no match
463 if s2_dnstr != s_dnstr:
464 continue
466 #XXX GUID comparison
467 lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or
468 (cn_conn.whenCreated == cn2_conn.whenCreated and
469 packed_guid < ndr_pack(cn2_conn.guid)))
471 if lesser:
472 break
474 if lesser and not cn_conn.is_rodc_topology():
475 cn_conn.to_be_deleted = True
477 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
478 # object dc that is the parent object of cn and the DC with
479 # the nTDSDSA object referenced by cn!fromServer are in
480 # different sites, a KCC acting as an ISTG in dc's site
481 # deletes cn if all of the following are true:
483 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
485 # cn!fromServer references an nTDSDSA object for a DC
486 # in a site other than the local DC's site.
488 # The keepConnections sequence returned by
489 # CreateIntersiteConnections() does not contain
490 # cn!objectGUID, or cn is "superseded by" (see below)
491 # another nTDSConnection cn2 and keepConnections
492 # contains cn2!objectGUID.
494 # The return value of CreateIntersiteConnections()
495 # was true.
497 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
498 # cn!options
500 else: # different site
502 if not mydsa.is_istg():
503 continue
505 if not cn_conn.is_generated():
506 continue
508 # TODO
509 # We are directly using this connection in intersite or
510 # we are using a connection which can supersede this one.
512 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
513 # appear to be correct.
515 # 1. cn!fromServer and cn!parent appear inconsistent with
516 # no cn2
517 # 2. The repsFrom do not imply each other
519 if cn_conn in self.kept_connections: # and not_superceded:
520 continue
522 # This is the result of create_intersite_connections
523 if not all_connected:
524 continue
526 if not cn_conn.is_rodc_topology():
527 cn_conn.to_be_deleted = True
529 if mydsa.is_ro() or opts.readonly:
530 for connect in mydsa.connect_table.values():
531 if connect.to_be_deleted:
532 DEBUG_FN("TO BE DELETED:\n%s" % connect)
533 if connect.to_be_added:
534 DEBUG_FN("TO BE ADDED:\n%s" % connect)
536 # Peform deletion from our tables but perform
537 # no database modification
538 mydsa.commit_connections(self.samdb, ro=True)
539 else:
540 # Commit any modified connections
541 mydsa.commit_connections(self.samdb)
543 def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
544 """Part of MS-ADTS 6.2.2.5.
546 Update t_repsFrom if necessary to satisfy requirements. Such
547 updates are typically required when the IDL_DRSGetNCChanges
548 server has moved from one site to another--for example, to
549 enable compression when the server is moved from the
550 client's site to another site.
552 :param n_rep: NC replica we need
553 :param t_repsFrom: repsFrom tuple to modify
554 :param s_rep: NC replica at source DSA
555 :param s_dsa: source DSA
556 :param cn_conn: Local DSA NTDSConnection child
558 ::returns: (update) bit field containing which portion of the
559 repsFrom was modified. This bit field is suitable as input
560 to IDL_DRSReplicaModify ulModifyFields element, as it consists
561 of these bits:
562 drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
563 drsuapi.DRSUAPI_DRS_UPDATE_FLAGS
564 drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS
566 s_dnstr = s_dsa.dsa_dnstr
567 update = 0x0
569 same_site = s_dnstr in self.my_site.dsa_table
571 # if schedule doesn't match then update and modify
572 times = convert_schedule_to_repltimes(cn_conn.schedule)
573 if times != t_repsFrom.schedule:
574 t_repsFrom.schedule = times
575 update |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
577 # Bit DRS_PER_SYNC is set in replicaFlags if and only
578 # if nTDSConnection schedule has a value v that specifies
579 # scheduled replication is to be performed at least once
580 # per week.
581 if cn_conn.is_schedule_minimum_once_per_week():
583 if ((t_repsFrom.replica_flags &
584 drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0):
585 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
587 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
588 # if the source DSA and the local DC's nTDSDSA object are
589 # in the same site or source dsa is the FSMO role owner
590 # of one or more FSMO roles in the NC replica.
591 if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
593 if ((t_repsFrom.replica_flags &
594 drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0):
595 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
597 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
598 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
599 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
600 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
601 # t.replicaFlags if and only if s and the local DC's
602 # nTDSDSA object are in different sites.
603 if ((cn_conn.options &
604 dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0):
606 if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
607 # XXX WARNING
609 # it LOOKS as if this next test is a bit silly: it
610 # checks the flag then sets it if it not set; the same
611 # effect could be achieved by unconditionally setting
612 # it. But in fact the repsFrom object has special
613 # magic attached to it, and altering replica_flags has
614 # side-effects. That is bad in my opinion, but there
615 # you go.
616 if ((t_repsFrom.replica_flags &
617 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
618 t_repsFrom.replica_flags |= \
619 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
621 elif not same_site:
623 if ((t_repsFrom.replica_flags &
624 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
625 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
627 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
628 # and only if s and the local DC's nTDSDSA object are
629 # not in the same site and the
630 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
631 # clear in cn!options
632 if (not same_site and
633 (cn_conn.options &
634 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
636 if ((t_repsFrom.replica_flags &
637 drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0):
638 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
640 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
641 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
642 if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
644 if ((t_repsFrom.replica_flags &
645 drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0):
646 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
648 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
649 # set in t.replicaFlags if and only if cn!enabledConnection = false.
650 if not cn_conn.is_enabled():
652 if ((t_repsFrom.replica_flags &
653 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0):
654 t_repsFrom.replica_flags |= \
655 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
657 if ((t_repsFrom.replica_flags &
658 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0):
659 t_repsFrom.replica_flags |= \
660 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
662 # If s and the local DC's nTDSDSA object are in the same site,
663 # cn!transportType has no value, or the RDN of cn!transportType
664 # is CN=IP:
666 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
668 # t.uuidTransport = NULL GUID.
670 # t.uuidDsa = The GUID-based DNS name of s.
672 # Otherwise:
674 # Bit DRS_MAIL_REP in t.replicaFlags is set.
676 # If x is the object with dsname cn!transportType,
677 # t.uuidTransport = x!objectGUID.
679 # Let a be the attribute identified by
680 # x!transportAddressAttribute. If a is
681 # the dNSHostName attribute, t.uuidDsa = the GUID-based
682 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
684 # It appears that the first statement i.e.
686 # "If s and the local DC's nTDSDSA object are in the same
687 # site, cn!transportType has no value, or the RDN of
688 # cn!transportType is CN=IP:"
690 # could be a slightly tighter statement if it had an "or"
691 # between each condition. I believe this should
692 # be interpreted as:
694 # IF (same-site) OR (no-value) OR (type-ip)
696 # because IP should be the primary transport mechanism
697 # (even in inter-site) and the absense of the transportType
698 # attribute should always imply IP no matter if its multi-site
700 # NOTE MS-TECH INCORRECT:
702 # All indications point to these statements above being
703 # incorrectly stated:
705 # t.uuidDsa = The GUID-based DNS name of s.
707 # Let a be the attribute identified by
708 # x!transportAddressAttribute. If a is
709 # the dNSHostName attribute, t.uuidDsa = the GUID-based
710 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
712 # because the uuidDSA is a GUID and not a GUID-base DNS
713 # name. Nor can uuidDsa hold (s!parent)!a if not
714 # dNSHostName. What should have been said is:
716 # t.naDsa = The GUID-based DNS name of s
718 # That would also be correct if transportAddressAttribute
719 # were "mailAddress" because (naDsa) can also correctly
720 # hold the SMTP ISM service address.
722 nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
724 # We're not currently supporting SMTP replication
725 # so is_smtp_replication_available() is currently
726 # always returning False
727 if ((same_site or
728 cn_conn.transport_dnstr is None or
729 cn_conn.transport_dnstr.find("CN=IP") == 0 or
730 not is_smtp_replication_available())):
732 if ((t_repsFrom.replica_flags &
733 drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0):
734 t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
736 t_repsFrom.transport_guid = misc.GUID()
738 # See (NOTE MS-TECH INCORRECT) above
739 if t_repsFrom.version == 0x1:
740 if t_repsFrom.dns_name1 is None or \
741 t_repsFrom.dns_name1 != nastr:
742 t_repsFrom.dns_name1 = nastr
743 else:
744 if t_repsFrom.dns_name1 is None or \
745 t_repsFrom.dns_name2 is None or \
746 t_repsFrom.dns_name1 != nastr or \
747 t_repsFrom.dns_name2 != nastr:
748 t_repsFrom.dns_name1 = nastr
749 t_repsFrom.dns_name2 = nastr
751 else:
752 # XXX This entire branch is NEVER used! Because we don't do SMTP!
753 # (see the if condition above). Just close your eyes here.
754 if ((t_repsFrom.replica_flags &
755 drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0):
756 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP
758 # We have a transport type but its not an
759 # object in the database
760 if cn_conn.transport_guid not in self.transport_table:
761 raise KCCError("Missing inter-site transport - (%s)" %
762 cn_conn.transport_dnstr)
764 x_transport = self.transport_table[str(cn_conn.transport_guid)]
766 if t_repsFrom.transport_guid != x_transport.guid:
767 t_repsFrom.transport_guid = x_transport.guid
769 # See (NOTE MS-TECH INCORRECT) above
770 if x_transport.address_attr == "dNSHostName":
772 if t_repsFrom.version == 0x1:
773 if t_repsFrom.dns_name1 is None or \
774 t_repsFrom.dns_name1 != nastr:
775 t_repsFrom.dns_name1 = nastr
776 else:
777 if t_repsFrom.dns_name1 is None or \
778 t_repsFrom.dns_name2 is None or \
779 t_repsFrom.dns_name1 != nastr or \
780 t_repsFrom.dns_name2 != nastr:
781 t_repsFrom.dns_name1 = nastr
782 t_repsFrom.dns_name2 = nastr
784 else:
785 # MS tech specification says we retrieve the named
786 # attribute in "transportAddressAttribute" from the parent of
787 # the DSA object
788 try:
789 pdnstr = s_dsa.get_parent_dnstr()
790 attrs = [x_transport.address_attr]
792 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
793 attrs=attrs)
794 except ldb.LdbError, (enum, estr):
795 raise KCCError(
796 "Unable to find attr (%s) for (%s) - (%s)" %
797 (x_transport.address_attr, pdnstr, estr))
799 msg = res[0]
800 nastr = str(msg[x_transport.address_attr][0])
802 # See (NOTE MS-TECH INCORRECT) above
803 if t_repsFrom.version == 0x1:
804 if t_repsFrom.dns_name1 is None or \
805 t_repsFrom.dns_name1 != nastr:
806 t_repsFrom.dns_name1 = nastr
807 else:
808 if t_repsFrom.dns_name1 is None or \
809 t_repsFrom.dns_name2 is None or \
810 t_repsFrom.dns_name1 != nastr or \
811 t_repsFrom.dns_name2 != nastr:
813 t_repsFrom.dns_name1 = nastr
814 t_repsFrom.dns_name2 = nastr
816 if t_repsFrom.is_modified():
817 logger.debug("modify_repsFrom(): %s" % t_repsFrom)
819 def is_repsFrom_implied(self, n_rep, cn_conn):
820 """Given a NC replica and NTDS Connection, determine if the connection
821 implies a repsFrom tuple should be present from the source DSA listed
822 in the connection to the naming context
824 :param n_rep: NC replica
825 :param conn: NTDS Connection
826 ::returns (True || False), source DSA:
828 #XXX different conditions for "implies" than MS-ADTS 6.2.2
830 # NTDS Connection must satisfy all the following criteria
831 # to imply a repsFrom tuple is needed:
833 # cn!enabledConnection = true.
834 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
835 # cn!fromServer references an nTDSDSA object.
837 s_dsa = None
839 if cn_conn.is_enabled() and not cn_conn.is_rodc_topology():
841 s_dnstr = cn_conn.get_from_dnstr()
842 if s_dnstr is not None:
843 s_dsa = self.get_dsa(s_dnstr)
845 # No DSA matching this source DN string?
846 if s_dsa is None:
847 return False, None
849 # To imply a repsFrom tuple is needed, each of these
850 # must be True:
852 # An NC replica of the NC "is present" on the DC to
853 # which the nTDSDSA object referenced by cn!fromServer
854 # corresponds.
856 # An NC replica of the NC "should be present" on
857 # the local DC
858 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
860 if s_rep is None or not s_rep.is_present():
861 return False, None
863 # To imply a repsFrom tuple is needed, each of these
864 # must be True:
866 # The NC replica on the DC referenced by cn!fromServer is
867 # a writable replica or the NC replica that "should be
868 # present" on the local DC is a partial replica.
870 # The NC is not a domain NC, the NC replica that
871 # "should be present" on the local DC is a partial
872 # replica, cn!transportType has no value, or
873 # cn!transportType has an RDN of CN=IP.
875 implied = (not s_rep.is_ro() or n_rep.is_partial()) and \
876 (not n_rep.is_domain() or
877 n_rep.is_partial() or
878 cn_conn.transport_dnstr is None or
879 cn_conn.transport_dnstr.find("CN=IP") == 0)
881 if implied:
882 return True, s_dsa
883 else:
884 return False, None
886 def translate_ntdsconn(self):
887 """This function adjusts values of repsFrom abstract attributes of NC
888 replicas on the local DC to match those implied by
889 nTDSConnection objects.
890 [MS-ADTS] 6.2.2.5
892 if self.my_dsa.is_translate_ntdsconn_disabled():
893 logger.debug("skipping translate_ntdsconn() "
894 "because disabling flag is set")
895 return
897 logger.debug("translate_ntdsconn(): enter")
899 current_rep_table, needed_rep_table = self.my_dsa.get_rep_tables()
901 # Filled in with replicas we currently have that need deleting
902 delete_reps = set()
904 # We're using the MS notation names here to allow
905 # correlation back to the published algorithm.
907 # n_rep - NC replica (n)
908 # t_repsFrom - tuple (t) in n!repsFrom
909 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
910 # object (s) such that (s!objectGUID = t.uuidDsa)
911 # In our IDL representation of repsFrom the (uuidDsa)
912 # attribute is called (source_dsa_obj_guid)
913 # cn_conn - (cn) is nTDSConnection object and child of the local
914 # DC's nTDSDSA object and (cn!fromServer = s)
915 # s_rep - source DSA replica of n
917 # If we have the replica and its not needed
918 # then we add it to the "to be deleted" list.
919 for dnstr in current_rep_table:
920 if dnstr not in needed_rep_table:
921 delete_reps.add(dnstr)
923 DEBUG_FN('current %d needed %d delete %d' % (len(current_rep_table),
924 len(needed_rep_table), len(delete_reps)))
926 if delete_reps:
927 DEBUG('deleting these reps: %s' % delete_reps)
928 for dnstr in delete_reps:
929 del current_rep_table[dnstr]
931 # Now perform the scan of replicas we'll need
932 # and compare any current repsFrom against the
933 # connections
934 for n_rep in needed_rep_table.values():
936 # load any repsFrom and fsmo roles as we'll
937 # need them during connection translation
938 n_rep.load_repsFrom(self.samdb)
939 n_rep.load_fsmo_roles(self.samdb)
941 # Loop thru the existing repsFrom tupples (if any)
942 # XXX This is a list and could contain duplicates
943 # (multiple load_repsFrom calls)
944 for t_repsFrom in n_rep.rep_repsFrom:
946 # for each tuple t in n!repsFrom, let s be the nTDSDSA
947 # object such that s!objectGUID = t.uuidDsa
948 guidstr = str(t_repsFrom.source_dsa_obj_guid)
949 s_dsa = self.get_dsa_by_guidstr(guidstr)
951 # Source dsa is gone from config (strange)
952 # so cleanup stale repsFrom for unlisted DSA
953 if s_dsa is None:
954 logger.warning("repsFrom source DSA guid (%s) not found" %
955 guidstr)
956 t_repsFrom.to_be_deleted = True
957 continue
959 s_dnstr = s_dsa.dsa_dnstr
961 # Retrieve my DSAs connection object (if it exists)
962 # that specifies the fromServer equivalent to
963 # the DSA that is specified in the repsFrom source
964 cn_conn = self.my_dsa.get_connection_by_from_dnstr(s_dnstr)
966 # Let (cn) be the nTDSConnection object such that (cn)
967 # is a child of the local DC's nTDSDSA object and
968 # (cn!fromServer = s) and (cn!options) does not contain
969 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
971 # KCC removes this repsFrom tuple if any of the following
972 # is true:
973 # cn = NULL.
974 # [...]
976 #XXX varying possible interpretations of rodc_topology
977 if cn_conn is None or cn_conn.is_rodc_topology():
978 t_repsFrom.to_be_deleted = True
979 continue
981 # [...] KCC removes this repsFrom tuple if:
983 # No NC replica of the NC "is present" on DSA that
984 # would be source of replica
986 # A writable replica of the NC "should be present" on
987 # the local DC, but a partial replica "is present" on
988 # the source DSA
989 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
991 if s_rep is None or not s_rep.is_present() or \
992 (not n_rep.is_ro() and s_rep.is_partial()):
994 t_repsFrom.to_be_deleted = True
995 continue
997 # If the KCC did not remove t from n!repsFrom, it updates t
998 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
1000 # Loop thru connections and add implied repsFrom tuples
1001 # for each NTDSConnection under our local DSA if the
1002 # repsFrom is not already present
1003 for cn_conn in self.my_dsa.connect_table.values():
1005 implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn)
1006 if not implied:
1007 continue
1009 # Loop thru the existing repsFrom tupples (if any) and
1010 # if we already have a tuple for this connection then
1011 # no need to proceed to add. It will have been changed
1012 # to have the correct attributes above
1013 for t_repsFrom in n_rep.rep_repsFrom:
1014 guidstr = str(t_repsFrom.source_dsa_obj_guid)
1015 #XXXX what?
1016 if s_dsa is self.get_dsa_by_guidstr(guidstr):
1017 s_dsa = None
1018 break
1020 if s_dsa is None:
1021 continue
1023 # Create a new RepsFromTo and proceed to modify
1024 # it according to specification
1025 t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
1027 t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
1029 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
1031 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
1033 # Add to our NC repsFrom as this is newly computed
1034 if t_repsFrom.is_modified():
1035 n_rep.rep_repsFrom.append(t_repsFrom)
1037 if opts.readonly:
1038 # Display any to be deleted or modified repsFrom
1039 text = n_rep.dumpstr_to_be_deleted()
1040 if text:
1041 logger.info("TO BE DELETED:\n%s" % text)
1042 text = n_rep.dumpstr_to_be_modified()
1043 if text:
1044 logger.info("TO BE MODIFIED:\n%s" % text)
1046 # Peform deletion from our tables but perform
1047 # no database modification
1048 n_rep.commit_repsFrom(self.samdb, ro=True)
1049 else:
1050 # Commit any modified repsFrom to the NC replica
1051 n_rep.commit_repsFrom(self.samdb)
1053 def merge_failed_links(self):
1054 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
1055 The KCC on a writable DC attempts to merge the link and connection
1056 failure information from bridgehead DCs in its own site to help it
1057 identify failed bridgehead DCs.
1059 # MS-TECH Ref 6.2.2.3.2 Merge of kCCFailedLinks and kCCFailedLinks
1060 # from Bridgeheads
1062 # 1. Queries every bridgehead server in your site (other than yourself)
1063 # 2. For every ntDSConnection that references a server in a different
1064 # site merge all the failure info
1066 # XXX - not implemented yet
1067 if opts.attempt_live_connections:
1068 DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
1069 else:
1070 DEBUG_FN("skipping merge_failed_links() because it requires "
1071 "real network connections\n"
1072 "and we weren't asked to --attempt-live-connections")
1074 def setup_graph(self, part):
1075 """Set up a GRAPH, populated with a VERTEX for each site
1076 object, a MULTIEDGE for each siteLink object, and a
1077 MUTLIEDGESET for each siteLinkBridge object (or implied
1078 siteLinkBridge).
1080 ::returns: a new graph
1082 guid_to_vertex = {}
1083 # Create graph
1084 g = IntersiteGraph()
1085 # Add vertices
1086 for site_guid, site in self.site_table.items():
1087 vertex = Vertex(site, part)
1088 vertex.guid = site_guid
1089 vertex.ndrpacked_guid = ndr_pack(site.site_guid)
1090 g.vertices.add(vertex)
1092 if not guid_to_vertex.get(site_guid):
1093 guid_to_vertex[site_guid] = []
1095 guid_to_vertex[site_guid].append(vertex)
1097 connected_vertices = set()
1098 for transport_guid, transport in self.transport_table.items():
1099 # Currently only ever "IP"
1100 if transport.name != 'IP':
1101 DEBUG_FN("setup_graph is ignoring transport %s" %
1102 transport.name)
1103 continue
1104 for site_link_dn, site_link in self.sitelink_table.items():
1105 new_edge = create_edge(transport_guid, site_link,
1106 guid_to_vertex)
1107 connected_vertices.update(new_edge.vertices)
1108 g.edges.add(new_edge)
1110 # If 'Bridge all site links' is enabled and Win2k3 bridges required
1111 # is not set
1112 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1113 # No documentation for this however, ntdsapi.h appears to have:
1114 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1115 if (((self.my_site.site_options & 0x00000002) == 0
1116 and (self.my_site.site_options & 0x00001000) == 0)):
1117 g.edge_set.add(create_auto_edge_set(g, transport_guid))
1118 else:
1119 # TODO get all site link bridges
1120 for site_link_bridge in []:
1121 g.edge_set.add(create_edge_set(g, transport_guid,
1122 site_link_bridge))
1124 g.connected_vertices = connected_vertices
1126 #be less verbose in dot file output unless --debug
1127 do_dot_files = opts.dot_files and opts.debug
1128 dot_edges = []
1129 for edge in g.edges:
1130 for a, b in itertools.combinations(edge.vertices, 2):
1131 dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
1132 verify_properties = ()
1133 verify_and_dot('site_edges', dot_edges, directed=False,
1134 label=self.my_dsa_dnstr,
1135 properties=verify_properties, debug=DEBUG,
1136 verify=opts.verify,
1137 dot_files=do_dot_files)
1139 return g
1141 def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
1142 """Get a bridghead DC.
1144 :param site: site object representing for which a bridgehead
1145 DC is desired.
1146 :param part: crossRef for NC to replicate.
1147 :param transport: interSiteTransport object for replication
1148 traffic.
1149 :param partial_ok: True if a DC containing a partial
1150 replica or a full replica will suffice, False if only
1151 a full replica will suffice.
1152 :param detect_failed: True to detect failed DCs and route
1153 replication traffic around them, False to assume no DC
1154 has failed.
1155 ::returns: dsa object for the bridgehead DC or None
1158 bhs = self.get_all_bridgeheads(site, part, transport,
1159 partial_ok, detect_failed)
1160 if len(bhs) == 0:
1161 DEBUG_MAGENTA("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
1162 site.site_dnstr)
1163 return None
1164 else:
1165 DEBUG_GREEN("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
1166 (site.site_dnstr, bhs[0].dsa_dnstr))
1167 return bhs[0]
1169 def get_all_bridgeheads(self, site, part, transport,
1170 partial_ok, detect_failed):
1171 """Get all bridghead DCs satisfying the given criteria
1173 :param site: site object representing the site for which
1174 bridgehead DCs are desired.
1175 :param part: partition for NC to replicate.
1176 :param transport: interSiteTransport object for
1177 replication traffic.
1178 :param partial_ok: True if a DC containing a partial
1179 replica or a full replica will suffice, False if
1180 only a full replica will suffice.
1181 :param detect_failed: True to detect failed DCs and route
1182 replication traffic around them, FALSE to assume
1183 no DC has failed.
1184 ::returns: list of dsa object for available bridgehead
1185 DCs or None
1188 bhs = []
1190 logger.debug("get_all_bridgeheads: %s" % transport.name)
1191 if 'Site-5' in site.site_dnstr:
1192 DEBUG_RED("get_all_bridgeheads with %s, part%s, partial_ok %s"
1193 " detect_failed %s" % (site.site_dnstr, part.partstr,
1194 partial_ok, detect_failed))
1195 logger.debug(site.rw_dsa_table)
1196 for dsa in site.rw_dsa_table.values():
1198 pdnstr = dsa.get_parent_dnstr()
1200 # IF t!bridgeheadServerListBL has one or more values and
1201 # t!bridgeheadServerListBL does not contain a reference
1202 # to the parent object of dc then skip dc
1203 if ((len(transport.bridgehead_list) != 0 and
1204 pdnstr not in transport.bridgehead_list)):
1205 continue
1207 # IF dc is in the same site as the local DC
1208 # IF a replica of cr!nCName is not in the set of NC replicas
1209 # that "should be present" on dc or a partial replica of the
1210 # NC "should be present" but partialReplicasOkay = FALSE
1211 # Skip dc
1212 if self.my_site.same_site(dsa):
1213 needed, ro, partial = part.should_be_present(dsa)
1214 if not needed or (partial and not partial_ok):
1215 continue
1216 rep = dsa.get_current_replica(part.nc_dnstr)
1218 # ELSE
1219 # IF an NC replica of cr!nCName is not in the set of NC
1220 # replicas that "are present" on dc or a partial replica of
1221 # the NC "is present" but partialReplicasOkay = FALSE
1222 # Skip dc
1223 else:
1224 rep = dsa.get_current_replica(part.nc_dnstr)
1225 if rep is None or (rep.is_partial() and not partial_ok):
1226 continue
1228 # IF AmIRODC() and cr!nCName corresponds to default NC then
1229 # Let dsaobj be the nTDSDSA object of the dc
1230 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1231 # Skip dc
1232 if self.my_dsa.is_ro() and rep is not None and rep.is_default():
1233 if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1234 continue
1236 # IF t!name != "IP" and the parent object of dc has no value for
1237 # the attribute specified by t!transportAddressAttribute
1238 # Skip dc
1239 if transport.name != "IP":
1240 # MS tech specification says we retrieve the named
1241 # attribute in "transportAddressAttribute" from the parent
1242 # of the DSA object
1243 try:
1244 attrs = [transport.address_attr]
1246 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
1247 attrs=attrs)
1248 except ldb.LdbError, (enum, estr):
1249 continue
1251 msg = res[0]
1252 if transport.address_attr not in msg:
1253 continue
1255 nastr = str(msg[transport.address_attr][0])
1257 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1258 # Skip dc
1259 if self.is_bridgehead_failed(dsa, detect_failed):
1260 DEBUG("bridgehead is failed")
1261 continue
1263 logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
1264 bhs.append(dsa)
1266 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1267 # s!options
1268 # SORT bhs such that all GC servers precede DCs that are not GC
1269 # servers, and otherwise by ascending objectGUID
1270 # ELSE
1271 # SORT bhs in a random order
1272 if site.is_random_bridgehead_disabled():
1273 bhs.sort(sort_dsa_by_gc_and_guid)
1274 else:
1275 random.shuffle(bhs)
1276 DEBUG_YELLOW(bhs)
1277 return bhs
1279 def is_bridgehead_failed(self, dsa, detect_failed):
1280 """Determine whether a given DC is known to be in a failed state
1281 ::returns: True if and only if the DC should be considered failed
1283 Here we DEPART from the pseudo code spec which appears to be
1284 wrong. It says, in full:
1286 /***** BridgeheadDCFailed *****/
1287 /* Determine whether a given DC is known to be in a failed state.
1288 * IN: objectGUID - objectGUID of the DC's nTDSDSA object.
1289 * IN: detectFailedDCs - TRUE if and only failed DC detection is
1290 * enabled.
1291 * RETURNS: TRUE if and only if the DC should be considered to be in a
1292 * failed state.
1294 BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
1296 IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
1297 the options attribute of the site settings object for the local
1298 DC's site
1299 RETURN FALSE
1300 ELSEIF a tuple z exists in the kCCFailedLinks or
1301 kCCFailedConnections variables such that z.UUIDDsa =
1302 objectGUID, z.FailureCount > 1, and the current time -
1303 z.TimeFirstFailure > 2 hours
1304 RETURN TRUE
1305 ELSE
1306 RETURN detectFailedDCs
1307 ENDIF
1310 where you will see detectFailedDCs is not behaving as
1311 advertised -- it is acting as a default return code in the
1312 event that a failure is not detected, not a switch turning
1313 detection on or off. Elsewhere the documentation seems to
1314 concur with the comment rather than the code.
1316 if not detect_failed:
1317 return False
1319 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1320 # When DETECT_STALE_DISABLED, we can never know of if
1321 # it's in a failed state
1322 if self.my_site.site_options & 0x00000008:
1323 return False
1325 return self.is_stale_link_connection(dsa)
1327 def create_connection(self, part, rbh, rsite, transport,
1328 lbh, lsite, link_opt, link_sched,
1329 partial_ok, detect_failed):
1330 """Create an nTDSConnection object with the given parameters
1331 if one does not already exist.
1333 :param part: crossRef object for the NC to replicate.
1334 :param rbh: nTDSDSA object for DC to act as the
1335 IDL_DRSGetNCChanges server (which is in a site other
1336 than the local DC's site).
1337 :param rsite: site of the rbh
1338 :param transport: interSiteTransport object for the transport
1339 to use for replication traffic.
1340 :param lbh: nTDSDSA object for DC to act as the
1341 IDL_DRSGetNCChanges client (which is in the local DC's site).
1342 :param lsite: site of the lbh
1343 :param link_opt: Replication parameters (aggregated siteLink options,
1344 etc.)
1345 :param link_sched: Schedule specifying the times at which
1346 to begin replicating.
1347 :partial_ok: True if bridgehead DCs containing partial
1348 replicas of the NC are acceptable.
1349 :param detect_failed: True to detect failed DCs and route
1350 replication traffic around them, FALSE to assume no DC
1351 has failed.
1353 rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
1354 partial_ok, False)
1355 rbh_table = {x.dsa_dnstr: x for x in rbhs_all}
1357 DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all),
1358 [x.dsa_dnstr for x in rbhs_all]))
1360 # MS-TECH says to compute rbhs_avail but then doesn't use it
1361 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1362 # partial_ok, detect_failed)
1364 lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
1365 partial_ok, False)
1366 if lbh.is_ro():
1367 lbhs_all.append(lbh)
1369 DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all),
1370 [x.dsa_dnstr for x in lbhs_all]))
1372 # MS-TECH says to compute lbhs_avail but then doesn't use it
1373 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1374 # partial_ok, detect_failed)
1376 # FOR each nTDSConnection object cn such that the parent of cn is
1377 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1378 for ldsa in lbhs_all:
1379 for cn in ldsa.connect_table.values():
1381 rdsa = rbh_table.get(cn.from_dnstr)
1382 if rdsa is None:
1383 continue
1385 DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr)
1386 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1387 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1388 # cn!transportType references t
1389 if ((cn.is_generated() and
1390 not cn.is_rodc_topology() and
1391 cn.transport_guid == transport.guid)):
1393 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1394 # cn!options and cn!schedule != sch
1395 # Perform an originating update to set cn!schedule to
1396 # sched
1397 if ((not cn.is_user_owned_schedule() and
1398 not cn.is_equivalent_schedule(link_sched))):
1399 cn.schedule = link_sched
1400 cn.set_modified(True)
1402 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1403 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1404 if cn.is_override_notify_default() and \
1405 cn.is_use_notify():
1407 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1408 # ri.Options
1409 # Perform an originating update to clear bits
1410 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1411 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1412 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
1413 cn.options &= \
1414 ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1415 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1416 cn.set_modified(True)
1418 # ELSE
1419 else:
1421 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1422 # ri.Options
1423 # Perform an originating update to set bits
1424 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1425 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1426 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1427 cn.options |= \
1428 (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1429 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1430 cn.set_modified(True)
1432 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1433 if cn.is_twoway_sync():
1435 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1436 # ri.Options
1437 # Perform an originating update to clear bit
1438 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1439 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
1440 cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1441 cn.set_modified(True)
1443 # ELSE
1444 else:
1446 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1447 # ri.Options
1448 # Perform an originating update to set bit
1449 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1450 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1451 cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1452 cn.set_modified(True)
1454 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1455 # in cn!options
1456 if cn.is_intersite_compression_disabled():
1458 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1459 # in ri.Options
1460 # Perform an originating update to clear bit
1461 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1462 # cn!options
1463 if ((link_opt &
1464 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0):
1465 cn.options &= \
1466 ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1467 cn.set_modified(True)
1469 # ELSE
1470 else:
1471 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1472 # ri.Options
1473 # Perform an originating update to set bit
1474 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1475 # cn!options
1476 if ((link_opt &
1477 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
1478 cn.options |= \
1479 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1480 cn.set_modified(True)
1482 # Display any modified connection
1483 if opts.readonly:
1484 if cn.to_be_modified:
1485 logger.info("TO BE MODIFIED:\n%s" % cn)
1487 ldsa.commit_connections(self.samdb, ro=True)
1488 else:
1489 ldsa.commit_connections(self.samdb)
1490 # ENDFOR
1492 valid_connections = 0
1494 # FOR each nTDSConnection object cn such that cn!parent is
1495 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1496 for ldsa in lbhs_all:
1497 for cn in ldsa.connect_table.values():
1499 rdsa = rbh_table.get(cn.from_dnstr)
1500 if rdsa is None:
1501 continue
1503 DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr)
1505 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1506 # cn!transportType references t) and
1507 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1508 if (((not cn.is_generated() or
1509 cn.transport_guid == transport.guid) and
1510 not cn.is_rodc_topology())):
1512 # LET rguid be the objectGUID of the nTDSDSA object
1513 # referenced by cn!fromServer
1514 # LET lguid be (cn!parent)!objectGUID
1516 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1517 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1518 # Increment cValidConnections by 1
1519 if ((not self.is_bridgehead_failed(rdsa, detect_failed) and
1520 not self.is_bridgehead_failed(ldsa, detect_failed))):
1521 valid_connections += 1
1523 # IF keepConnections does not contain cn!objectGUID
1524 # APPEND cn!objectGUID to keepConnections
1525 self.kept_connections.add(cn)
1527 # ENDFOR
1528 DEBUG_RED("valid connections %d" % valid_connections)
1529 DEBUG("kept_connections:\n%s" % (self.kept_connections,))
1530 # IF cValidConnections = 0
1531 if valid_connections == 0:
1533 # LET opt be NTDSCONN_OPT_IS_GENERATED
1534 opt = dsdb.NTDSCONN_OPT_IS_GENERATED
1536 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1537 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1538 # NTDSCONN_OPT_USE_NOTIFY in opt
1539 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1540 opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1541 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1543 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1544 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1545 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1546 opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1548 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1549 # ri.Options
1550 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1551 if ((link_opt &
1552 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
1553 opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1555 # Perform an originating update to create a new nTDSConnection
1556 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1557 # cn!options = opt, cn!transportType is a reference to t,
1558 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1559 cn = lbh.new_connection(opt, 0, transport,
1560 rbh.dsa_dnstr, link_sched)
1562 # Display any added connection
1563 if opts.readonly:
1564 if cn.to_be_added:
1565 logger.info("TO BE ADDED:\n%s" % cn)
1567 lbh.commit_connections(self.samdb, ro=True)
1568 else:
1569 lbh.commit_connections(self.samdb)
1571 # APPEND cn!objectGUID to keepConnections
1572 self.kept_connections.add(cn)
1574 def add_transports(self, vertex, local_vertex, graph, detect_failed):
1576 # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
1577 # here, but using vertex seems to make more sense. That is,
1578 # the docs want this:
1580 #bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1581 # local_vertex.is_black(), detect_failed)
1583 # TODO WHY?????
1585 vertex.accept_red_red = []
1586 vertex.accept_black = []
1587 found_failed = False
1588 for t_guid, transport in self.transport_table.items():
1589 if transport.name != 'IP':
1590 #XXX well this is cheating a bit
1591 logging.warning("WARNING: we are ignoring a transport named %r"
1592 % transport.name)
1593 continue
1595 # FLAG_CR_NTDS_DOMAIN 0x00000002
1596 if ((vertex.is_red() and transport.name != "IP" and
1597 vertex.part.system_flags & 0x00000002)):
1598 continue
1600 if vertex not in graph.connected_vertices:
1601 continue
1603 partial_replica_okay = vertex.is_black()
1604 bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1605 partial_replica_okay, detect_failed)
1606 if bh is None:
1607 found_failed = True
1608 continue
1610 vertex.accept_red_red.append(t_guid)
1611 vertex.accept_black.append(t_guid)
1613 # Add additional transport to allow another run of Dijkstra
1614 vertex.accept_red_red.append("EDGE_TYPE_ALL")
1615 vertex.accept_black.append("EDGE_TYPE_ALL")
1617 return found_failed
1619 def create_connections(self, graph, part, detect_failed):
1620 """Construct an NC replica graph for the NC identified by
1621 the given crossRef, then create any additional nTDSConnection
1622 objects required.
1624 :param graph: site graph.
1625 :param part: crossRef object for NC.
1626 :param detect_failed: True to detect failed DCs and route
1627 replication traffic around them, False to assume no DC
1628 has failed.
1630 Modifies self.kept_connections by adding any connections
1631 deemed to be "in use".
1633 ::returns: (all_connected, found_failed_dc)
1634 (all_connected) True if the resulting NC replica graph
1635 connects all sites that need to be connected.
1636 (found_failed_dc) True if one or more failed DCs were
1637 detected.
1639 all_connected = True
1640 found_failed = False
1642 logger.debug("create_connections(): enter\n"
1643 "\tpartdn=%s\n\tdetect_failed=%s" %
1644 (part.nc_dnstr, detect_failed))
1646 # XXX - This is a highly abbreviated function from the MS-TECH
1647 # ref. It creates connections between bridgeheads to all
1648 # sites that have appropriate replicas. Thus we are not
1649 # creating a minimum cost spanning tree but instead
1650 # producing a fully connected tree. This should produce
1651 # a full (albeit not optimal cost) replication topology.
1653 my_vertex = Vertex(self.my_site, part)
1654 my_vertex.color_vertex()
1656 for v in graph.vertices:
1657 v.color_vertex()
1658 if self.add_transports(v, my_vertex, graph, False):
1659 found_failed = True
1661 # No NC replicas for this NC in the site of the local DC,
1662 # so no nTDSConnection objects need be created
1663 if my_vertex.is_white():
1664 return all_connected, found_failed
1666 edge_list, n_components = get_spanning_tree_edges(graph,
1667 self.my_site,
1668 label=part.partstr)
1670 logger.debug("%s Number of components: %d" %
1671 (part.nc_dnstr, n_components))
1672 if n_components > 1:
1673 all_connected = False
1675 # LET partialReplicaOkay be TRUE if and only if
1676 # localSiteVertex.Color = COLOR.BLACK
1677 partial_ok = my_vertex.is_black()
1679 # Utilize the IP transport only for now
1680 transport = self.ip_transport
1682 DEBUG("edge_list %s" % edge_list)
1683 for e in edge_list:
1684 # XXX more accurate comparison?
1685 if e.directed and e.vertices[0].site is self.my_site:
1686 continue
1688 if e.vertices[0].site is self.my_site:
1689 rsite = e.vertices[1].site
1690 else:
1691 rsite = e.vertices[0].site
1693 # We don't make connections to our own site as that
1694 # is intrasite topology generator's job
1695 if rsite is self.my_site:
1696 DEBUG("rsite is my_site")
1697 continue
1699 # Determine bridgehead server in remote site
1700 rbh = self.get_bridgehead(rsite, part, transport,
1701 partial_ok, detect_failed)
1702 if rbh is None:
1703 continue
1705 # RODC acts as an BH for itself
1706 # IF AmIRODC() then
1707 # LET lbh be the nTDSDSA object of the local DC
1708 # ELSE
1709 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1710 # cr, t, partialReplicaOkay, detectFailedDCs)
1711 if self.my_dsa.is_ro():
1712 lsite = self.my_site
1713 lbh = self.my_dsa
1714 else:
1715 lsite = self.my_site
1716 lbh = self.get_bridgehead(lsite, part, transport,
1717 partial_ok, detect_failed)
1718 # TODO
1719 if lbh is None:
1720 DEBUG_RED("DISASTER! lbh is None")
1721 return False, True
1723 DEBUG_CYAN("SITES")
1724 print lsite, rsite
1725 DEBUG_BLUE("vertices")
1726 print e.vertices
1727 DEBUG_BLUE("bridgeheads")
1728 print lbh, rbh
1729 DEBUG_BLUE("-" * 70)
1731 sitelink = e.site_link
1732 if sitelink is None:
1733 link_opt = 0x0
1734 link_sched = None
1735 else:
1736 link_opt = sitelink.options
1737 link_sched = sitelink.schedule
1739 self.create_connection(part, rbh, rsite, transport,
1740 lbh, lsite, link_opt, link_sched,
1741 partial_ok, detect_failed)
1743 return all_connected, found_failed
1745 def create_intersite_connections(self):
1746 """Computes an NC replica graph for each NC replica that "should be
1747 present" on the local DC or "is present" on any DC in the same site
1748 as the local DC. For each edge directed to an NC replica on such a
1749 DC from an NC replica on a DC in another site, the KCC creates an
1750 nTDSConnection object to imply that edge if one does not already
1751 exist.
1753 Modifies self.kept_connections - A set of nTDSConnection
1754 objects for edges that are directed
1755 to the local DC's site in one or more NC replica graphs.
1757 returns: True if spanning trees were created for all NC replica
1758 graphs, otherwise False.
1760 all_connected = True
1761 self.kept_connections = set()
1763 # LET crossRefList be the set containing each object o of class
1764 # crossRef such that o is a child of the CN=Partitions child of the
1765 # config NC
1767 # FOR each crossRef object cr in crossRefList
1768 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1769 # is clear in cr!systemFlags, skip cr.
1770 # LET g be the GRAPH return of SetupGraph()
1772 for part in self.part_table.values():
1774 if not part.is_enabled():
1775 continue
1777 if part.is_foreign():
1778 continue
1780 graph = self.setup_graph(part)
1782 # Create nTDSConnection objects, routing replication traffic
1783 # around "failed" DCs.
1784 found_failed = False
1786 connected, found_failed = self.create_connections(graph,
1787 part, True)
1789 DEBUG("with detect_failed: connected %s Found failed %s" %
1790 (connected, found_failed))
1791 if not connected:
1792 all_connected = False
1794 if found_failed:
1795 # One or more failed DCs preclude use of the ideal NC
1796 # replica graph. Add connections for the ideal graph.
1797 self.create_connections(graph, part, False)
1799 return all_connected
1802 def intersite(self):
1803 """The head method for generating the inter-site KCC replica
1804 connection graph and attendant nTDSConnection objects
1805 in the samdb.
1807 Produces self.kept_connections set of NTDS Connections
1808 that should be kept during subsequent pruning process.
1810 ::return (True or False): (True) if the produced NC replica
1811 graph connects all sites that need to be connected
1814 # Retrieve my DSA
1815 mydsa = self.my_dsa
1816 mysite = self.my_site
1817 all_connected = True
1819 logger.debug("intersite(): enter")
1821 # Determine who is the ISTG
1822 if opts.readonly:
1823 mysite.select_istg(self.samdb, mydsa, ro=True)
1824 else:
1825 mysite.select_istg(self.samdb, mydsa, ro=False)
1827 # Test whether local site has topology disabled
1828 if mysite.is_intersite_topology_disabled():
1829 logger.debug("intersite(): exit disabled all_connected=%d" %
1830 all_connected)
1831 return all_connected
1833 if not mydsa.is_istg():
1834 logger.debug("intersite(): exit not istg all_connected=%d" %
1835 all_connected)
1836 return all_connected
1838 self.merge_failed_links()
1840 # For each NC with an NC replica that "should be present" on the
1841 # local DC or "is present" on any DC in the same site as the
1842 # local DC, the KCC constructs a site graph--a precursor to an NC
1843 # replica graph. The site connectivity for a site graph is defined
1844 # by objects of class interSiteTransport, siteLink, and
1845 # siteLinkBridge in the config NC.
1847 all_connected = self.create_intersite_connections()
1849 logger.debug("intersite(): exit all_connected=%d" % all_connected)
1850 return all_connected
1852 def update_rodc_connection(self):
1853 """Runs when the local DC is an RODC and updates the RODC NTFRS
1854 connection object.
1856 # Given an nTDSConnection object cn1, such that cn1.options contains
1857 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1858 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1859 # that the following is true:
1861 # cn1.fromServer = cn2.fromServer
1862 # cn1.schedule = cn2.schedule
1864 # If no such cn2 can be found, cn1 is not modified.
1865 # If no such cn1 can be found, nothing is modified by this task.
1867 if not self.my_dsa.is_ro():
1868 return
1870 all_connections = self.my_dsa.connect_table.values()
1871 ro_connections = [x for x in all_connections if x.is_rodc_topology()]
1872 rw_connections = [x for x in all_connections
1873 if x not in ro_connections]
1875 # XXX here we are dealing with multiple RODC_TOPO connections,
1876 # if they exist. It is not clear whether the spec means that
1877 # or if it ever arises.
1878 if rw_connections and ro_connections:
1879 for con in ro_connections:
1880 cn2 = rw_connections[0]
1881 con.from_dnstr = cn2.from_dnstr
1882 con.schedule = cn2.schedule
1883 con.to_be_modified = True
1885 self.my_dsa.commit_connections(self.samdb, ro=opts.readonly)
1887 def intrasite_max_node_edges(self, node_count):
1888 """Returns the maximum number of edges directed to a node in
1889 the intrasite replica graph.
1891 The KCC does not create more
1892 than 50 edges directed to a single DC. To optimize replication,
1893 we compute that each node should have n+2 total edges directed
1894 to it such that (n) is the smallest non-negative integer
1895 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1897 (If the number of edges is m (i.e. n + 2), that is the same as
1898 2 * m*m - 2 * m + 3).
1900 edges n nodecount
1901 2 0 7
1902 3 1 15
1903 4 2 27
1904 5 3 43
1906 50 48 4903
1908 :param node_count: total number of nodes in the replica graph
1910 n = 0
1911 while True:
1912 if node_count <= (2 * (n * n) + (6 * n) + 7):
1913 break
1914 n = n + 1
1915 n = n + 2
1916 if n < 50:
1917 return n
1918 return 50
1920 def construct_intrasite_graph(self, site_local, dc_local,
1921 nc_x, gc_only, detect_stale):
1922 # [MS-ADTS] 6.2.2.2
1923 # We're using the MS notation names here to allow
1924 # correlation back to the published algorithm.
1926 # nc_x - naming context (x) that we are testing if it
1927 # "should be present" on the local DC
1928 # f_of_x - replica (f) found on a DC (s) for NC (x)
1929 # dc_s - DC where f_of_x replica was found
1930 # dc_local - local DC that potentially needs a replica
1931 # (f_of_x)
1932 # r_list - replica list R
1933 # p_of_x - replica (p) is partial and found on a DC (s)
1934 # for NC (x)
1935 # l_of_x - replica (l) is the local replica for NC (x)
1936 # that should appear on the local DC
1937 # r_len = is length of replica list |R|
1939 # If the DSA doesn't need a replica for this
1940 # partition (NC x) then continue
1941 needed, ro, partial = nc_x.should_be_present(dc_local)
1943 DEBUG_YELLOW("construct_intrasite_graph(): enter" +
1944 "\n\tgc_only=%d" % gc_only +
1945 "\n\tdetect_stale=%d" % detect_stale +
1946 "\n\tneeded=%s" % needed +
1947 "\n\tro=%s" % ro +
1948 "\n\tpartial=%s" % partial +
1949 "\n%s" % nc_x)
1951 if not needed:
1952 DEBUG_RED("%s lacks 'should be present' status, "
1953 "aborting construct_intersite_graph!" %
1954 nc_x.nc_dnstr)
1955 return
1957 # Create a NCReplica that matches what the local replica
1958 # should say. We'll use this below in our r_list
1959 l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
1960 nc_x.nc_dnstr)
1962 l_of_x.identify_by_basedn(self.samdb)
1964 l_of_x.rep_partial = partial
1965 l_of_x.rep_ro = ro
1967 # Add this replica that "should be present" to the
1968 # needed replica table for this DSA
1969 dc_local.add_needed_replica(l_of_x)
1971 # Replica list
1973 # Let R be a sequence containing each writable replica f of x
1974 # such that f "is present" on a DC s satisfying the following
1975 # criteria:
1977 # * s is a writable DC other than the local DC.
1979 # * s is in the same site as the local DC.
1981 # * If x is a read-only full replica and x is a domain NC,
1982 # then the DC's functional level is at least
1983 # DS_BEHAVIOR_WIN2008.
1985 # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
1986 # in the options attribute of the site settings object for
1987 # the local DC's site, or no tuple z exists in the
1988 # kCCFailedLinks or kCCFailedConnections variables such
1989 # that z.UUIDDsa is the objectGUID of the nTDSDSA object
1990 # for s, z.FailureCount > 0, and the current time -
1991 # z.TimeFirstFailure > 2 hours.
1993 r_list = []
1995 # We'll loop thru all the DSAs looking for
1996 # writeable NC replicas that match the naming
1997 # context dn for (nc_x)
1999 for dc_s in self.my_site.dsa_table.values():
2000 # If this partition (nc_x) doesn't appear as a
2001 # replica (f_of_x) on (dc_s) then continue
2002 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2003 continue
2005 # Pull out the NCReplica (f) of (x) with the dn
2006 # that matches NC (x) we are examining.
2007 f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2009 # Replica (f) of NC (x) must be writable
2010 if f_of_x.is_ro():
2011 continue
2013 # Replica (f) of NC (x) must satisfy the
2014 # "is present" criteria for DC (s) that
2015 # it was found on
2016 if not f_of_x.is_present():
2017 continue
2019 # DC (s) must be a writable DSA other than
2020 # my local DC. In other words we'd only replicate
2021 # from other writable DC
2022 if dc_s.is_ro() or dc_s is dc_local:
2023 continue
2025 # Certain replica graphs are produced only
2026 # for global catalogs, so test against
2027 # method input parameter
2028 if gc_only and not dc_s.is_gc():
2029 continue
2031 # DC (s) must be in the same site as the local DC
2032 # as this is the intra-site algorithm. This is
2033 # handled by virtue of placing DSAs in per
2034 # site objects (see enclosing for() loop)
2036 # If NC (x) is intended to be read-only full replica
2037 # for a domain NC on the target DC then the source
2038 # DC should have functional level at minimum WIN2008
2040 # Effectively we're saying that in order to replicate
2041 # to a targeted RODC (which was introduced in Windows 2008)
2042 # then we have to replicate from a DC that is also minimally
2043 # at that level.
2045 # You can also see this requirement in the MS special
2046 # considerations for RODC which state that to deploy
2047 # an RODC, at least one writable domain controller in
2048 # the domain must be running Windows Server 2008
2049 if ro and not partial and nc_x.nc_type == NCType.domain:
2050 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2051 continue
2053 # If we haven't been told to turn off stale connection
2054 # detection and this dsa has a stale connection then
2055 # continue
2056 if detect_stale and self.is_stale_link_connection(dc_s):
2057 continue
2059 # Replica meets criteria. Add it to table indexed
2060 # by the GUID of the DC that it appears on
2061 r_list.append(f_of_x)
2063 # If a partial (not full) replica of NC (x) "should be present"
2064 # on the local DC, append to R each partial replica (p of x)
2065 # such that p "is present" on a DC satisfying the same
2066 # criteria defined above for full replica DCs.
2068 # XXX This loop and the previous one differ only in whether
2069 # the replica is partial or not. here we only accept partial
2070 # (because we're partial); before we only accepted full. Order
2071 # doen't matter (the list is sorted a few lines down) so these
2072 # loops could easily be merged. Or this could be a helper
2073 # function.
2075 if partial:
2076 # Now we loop thru all the DSAs looking for
2077 # partial NC replicas that match the naming
2078 # context dn for (NC x)
2079 for dc_s in self.my_site.dsa_table.values():
2081 # If this partition NC (x) doesn't appear as a
2082 # replica (p) of NC (x) on the dsa DC (s) then
2083 # continue
2084 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2085 continue
2087 # Pull out the NCReplica with the dn that
2088 # matches NC (x) we are examining.
2089 p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2091 # Replica (p) of NC (x) must be partial
2092 if not p_of_x.is_partial():
2093 continue
2095 # Replica (p) of NC (x) must satisfy the
2096 # "is present" criteria for DC (s) that
2097 # it was found on
2098 if not p_of_x.is_present():
2099 continue
2101 # DC (s) must be a writable DSA other than
2102 # my DSA. In other words we'd only replicate
2103 # from other writable DSA
2104 if dc_s.is_ro() or dc_s is dc_local:
2105 continue
2107 # Certain replica graphs are produced only
2108 # for global catalogs, so test against
2109 # method input parameter
2110 if gc_only and not dc_s.is_gc():
2111 continue
2113 # If we haven't been told to turn off stale connection
2114 # detection and this dsa has a stale connection then
2115 # continue
2116 if detect_stale and self.is_stale_link_connection(dc_s):
2117 continue
2119 # Replica meets criteria. Add it to table indexed
2120 # by the GUID of the DSA that it appears on
2121 r_list.append(p_of_x)
2123 # Append to R the NC replica that "should be present"
2124 # on the local DC
2125 r_list.append(l_of_x)
2127 r_list.sort(sort_replica_by_dsa_guid)
2128 r_len = len(r_list)
2130 max_node_edges = self.intrasite_max_node_edges(r_len)
2132 # Add a node for each r_list element to the replica graph
2133 graph_list = []
2134 for rep in r_list:
2135 node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
2136 graph_list.append(node)
2138 # For each r(i) from (0 <= i < |R|-1)
2139 i = 0
2140 while i < (r_len-1):
2141 # Add an edge from r(i) to r(i+1) if r(i) is a full
2142 # replica or r(i+1) is a partial replica
2143 if not r_list[i].is_partial() or r_list[i+1].is_partial():
2144 graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)
2146 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2147 # replica or ri is a partial replica.
2148 if not r_list[i+1].is_partial() or r_list[i].is_partial():
2149 graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
2150 i = i + 1
2152 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2153 # or r0 is a partial replica.
2154 if not r_list[r_len-1].is_partial() or r_list[0].is_partial():
2155 graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr)
2157 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2158 # r|R|-1 is a partial replica.
2159 if not r_list[0].is_partial() or r_list[r_len-1].is_partial():
2160 graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr)
2162 DEBUG("r_list is length %s" % len(r_list))
2163 DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr))
2164 for x in r_list))
2166 do_dot_files = opts.dot_files and opts.debug
2167 if opts.verify or do_dot_files:
2168 dot_edges = []
2169 dot_vertices = set()
2170 for v1 in graph_list:
2171 dot_vertices.add(v1.dsa_dnstr)
2172 for v2 in v1.edge_from:
2173 dot_edges.append((v2, v1.dsa_dnstr))
2174 dot_vertices.add(v2)
2176 verify_properties = ('connected', 'directed_double_ring')
2177 verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
2178 label='%s__%s__%s' % (site_local.site_dnstr,
2179 nctype_lut[nc_x.nc_type],
2180 nc_x.nc_dnstr),
2181 properties=verify_properties, debug=DEBUG,
2182 verify=opts.verify,
2183 dot_files=do_dot_files, directed=True)
2185 # For each existing nTDSConnection object implying an edge
2186 # from rj of R to ri such that j != i, an edge from rj to ri
2187 # is not already in the graph, and the total edges directed
2188 # to ri is less than n+2, the KCC adds that edge to the graph.
2189 for vertex in graph_list:
2190 dsa = self.my_site.dsa_table[vertex.dsa_dnstr]
2191 for connect in dsa.connect_table.values():
2192 remote = connect.from_dnstr
2193 if remote in self.my_site.dsa_table:
2194 vertex.add_edge_from(remote)
2196 DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list))
2197 DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list))
2199 for tnode in graph_list:
2200 # To optimize replication latency in sites with many NC
2201 # replicas, the KCC adds new edges directed to ri to bring
2202 # the total edges to n+2, where the NC replica rk of R
2203 # from which the edge is directed is chosen at random such
2204 # that k != i and an edge from rk to ri is not already in
2205 # the graph.
2207 # Note that the KCC tech ref does not give a number for
2208 # the definition of "sites with many NC replicas". At a
2209 # bare minimum to satisfy n+2 edges directed at a node we
2210 # have to have at least three replicas in |R| (i.e. if n
2211 # is zero then at least replicas from two other graph
2212 # nodes may direct edges to us).
2213 if r_len >= 3 and not tnode.has_sufficient_edges():
2214 candidates = [x for x in graph_list if
2215 (x is not tnode and
2216 x.dsa_dnstr not in tnode.edge_from)]
2218 DEBUG_BLUE("looking for random link for %s. r_len %d, "
2219 "graph len %d candidates %d"
2220 % (tnode.dsa_dnstr, r_len, len(graph_list),
2221 len(candidates)))
2223 DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])
2225 while candidates and not tnode.has_sufficient_edges():
2226 other = random.choice(candidates)
2227 DEBUG("trying to add candidate %s" % other.dsa_dstr)
2228 if not tnode.add_edge_from(other):
2229 DEBUG_RED("could not add %s" % other.dsa_dstr)
2230 candidates.remove(other)
2231 else:
2232 DEBUG_CYAN("not adding links to %s: nodes %s, links is %s/%s" %
2233 (tnode.dsa_dnstr, r_len, len(tnode.edge_from),
2234 tnode.max_edges))
2236 # Print the graph node in debug mode
2237 logger.debug("%s" % tnode)
2239 # For each edge directed to the local DC, ensure a nTDSConnection
2240 # points to us that satisfies the KCC criteria
2242 if tnode.dsa_dnstr == dc_local.dsa_dnstr:
2243 tnode.add_connections_from_edges(dc_local)
2245 if opts.verify or do_dot_files:
2246 dot_edges = []
2247 dot_vertices = set()
2248 for v1 in graph_list:
2249 dot_vertices.add(v1.dsa_dnstr)
2250 for v2 in v1.edge_from:
2251 dot_edges.append((v2, v1.dsa_dnstr))
2252 dot_vertices.add(v2)
2254 verify_properties = ('connected', 'directed_double_ring_or_small')
2255 verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
2256 label='%s__%s__%s' % (site_local.site_dnstr,
2257 nctype_lut[nc_x.nc_type],
2258 nc_x.nc_dnstr),
2259 properties=verify_properties, debug=DEBUG,
2260 verify=opts.verify,
2261 dot_files=do_dot_files, directed=True)
2263 def intrasite(self):
2264 """The head method for generating the intra-site KCC replica
2265 connection graph and attendant nTDSConnection objects
2266 in the samdb
2268 # Retrieve my DSA
2269 mydsa = self.my_dsa
2271 logger.debug("intrasite(): enter")
2273 # Test whether local site has topology disabled
2274 mysite = self.my_site
2275 if mysite.is_intrasite_topology_disabled():
2276 return
2278 detect_stale = (not mysite.is_detect_stale_disabled())
2279 for connect in mydsa.connect_table.values():
2280 if connect.to_be_added:
2281 DEBUG_CYAN("TO BE ADDED:\n%s" % connect)
2283 # Loop thru all the partitions, with gc_only False
2284 for partdn, part in self.part_table.items():
2285 self.construct_intrasite_graph(mysite, mydsa, part, False,
2286 detect_stale)
2287 for connect in mydsa.connect_table.values():
2288 if connect.to_be_added:
2289 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2291 # If the DC is a GC server, the KCC constructs an additional NC
2292 # replica graph (and creates nTDSConnection objects) for the
2293 # config NC as above, except that only NC replicas that "are present"
2294 # on GC servers are added to R.
2295 for connect in mydsa.connect_table.values():
2296 if connect.to_be_added:
2297 DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)
2299 # Do it again, with gc_only True
2300 for partdn, part in self.part_table.items():
2301 if part.is_config():
2302 self.construct_intrasite_graph(mysite, mydsa, part, True,
2303 detect_stale)
2305 # The DC repeats the NC replica graph computation and nTDSConnection
2306 # creation for each of the NC replica graphs, this time assuming
2307 # that no DC has failed. It does so by re-executing the steps as
2308 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2309 # set in the options attribute of the site settings object for
2310 # the local DC's site. (ie. we set "detec_stale" flag to False)
2311 for connect in mydsa.connect_table.values():
2312 if connect.to_be_added:
2313 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2315 # Loop thru all the partitions.
2316 for partdn, part in self.part_table.items():
2317 self.construct_intrasite_graph(mysite, mydsa, part, False,
2318 False) # don't detect stale
2320 # If the DC is a GC server, the KCC constructs an additional NC
2321 # replica graph (and creates nTDSConnection objects) for the
2322 # config NC as above, except that only NC replicas that "are present"
2323 # on GC servers are added to R.
2324 for connect in mydsa.connect_table.values():
2325 if connect.to_be_added:
2326 DEBUG_RED("TO BE ADDED:\n%s" % connect)
2328 for partdn, part in self.part_table.items():
2329 if part.is_config():
2330 self.construct_intrasite_graph(mysite, mydsa, part, True,
2331 False) # don't detect stale
2333 if opts.readonly:
2334 # Display any to be added or modified repsFrom
2335 for connect in mydsa.connect_table.values():
2336 if connect.to_be_deleted:
2337 logger.info("TO BE DELETED:\n%s" % connect)
2338 if connect.to_be_modified:
2339 logger.info("TO BE MODIFIED:\n%s" % connect)
2340 if connect.to_be_added:
2341 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
2343 mydsa.commit_connections(self.samdb, ro=True)
2344 else:
2345 # Commit any newly created connections to the samdb
2346 mydsa.commit_connections(self.samdb)
2348 def list_dsas(self):
2349 self.load_my_site()
2350 self.load_my_dsa()
2352 self.load_all_sites()
2353 self.load_all_partitions()
2354 self.load_all_transports()
2355 self.load_all_sitelinks()
2356 dsas = []
2357 for site in self.site_table.values():
2358 dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1)
2359 for dsa in site.dsa_table.values()])
2360 return dsas
2362 def load_samdb(self, dburl, lp, creds):
2363 self.samdb = SamDB(url=dburl,
2364 session_info=system_session(),
2365 credentials=creds, lp=lp)
2367 def plot_all_connections(self, basename, verify_properties=()):
2368 verify = verify_properties and opts.verify
2369 plot = opts.dot_files
2370 if not (verify or plot):
2371 return
2373 dot_edges = []
2374 dot_vertices = []
2375 edge_colours = []
2376 vertex_colours = []
2378 for dsa in self.dsa_by_dnstr.values():
2379 dot_vertices.append(dsa.dsa_dnstr)
2380 if dsa.is_ro():
2381 vertex_colours.append('#cc0000')
2382 else:
2383 vertex_colours.append('#0000cc')
2384 for con in dsa.connect_table.values():
2385 if con.is_rodc_topology():
2386 edge_colours.append('red')
2387 else:
2388 edge_colours.append('blue')
2389 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
2391 verify_and_dot(basename, dot_edges, vertices=dot_vertices,
2392 label=self.my_dsa_dnstr, properties=verify_properties,
2393 debug=DEBUG, verify=verify, dot_files=plot,
2394 directed=True, edge_colors=edge_colours,
2395 vertex_colors=vertex_colours)
2397 def run(self, dburl, lp, creds, forced_local_dsa=None,
2398 forget_local_links=False, forget_intersite_links=False):
2399 """Method to perform a complete run of the KCC and
2400 produce an updated topology for subsequent NC replica
2401 syncronization between domain controllers
2403 # We may already have a samdb setup if we are
2404 # currently importing an ldif for a test run
2405 if self.samdb is None:
2406 try:
2407 self.load_samdb(dburl, lp, creds)
2408 except ldb.LdbError, (num, msg):
2409 logger.error("Unable to open sam database %s : %s" %
2410 (dburl, msg))
2411 return 1
2413 if forced_local_dsa:
2414 self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" %
2415 forced_local_dsa)
2417 try:
2418 # Setup
2419 self.load_my_site()
2420 self.load_my_dsa()
2422 self.load_all_sites()
2423 self.load_all_partitions()
2424 self.load_all_transports()
2425 self.load_all_sitelinks()
2427 if opts.verify or opts.dot_files:
2428 guid_to_dnstr = {}
2429 for site in self.site_table.values():
2430 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
2431 for dnstr, dsa
2432 in site.dsa_table.items())
2434 self.plot_all_connections('dsa_initial')
2436 dot_edges = []
2437 current_reps, needed_reps = self.my_dsa.get_rep_tables()
2438 for dnstr, c_rep in current_reps.items():
2439 DEBUG("c_rep %s" % c_rep)
2440 dot_edges.append((self.my_dsa.dsa_dnstr, dnstr))
2442 verify_and_dot('dsa_repsFrom_initial', dot_edges,
2443 directed=True, label=self.my_dsa_dnstr,
2444 properties=(), debug=DEBUG, verify=opts.verify,
2445 dot_files=opts.dot_files)
2447 dot_edges = []
2448 for site in self.site_table.values():
2449 for dsa in site.dsa_table.values():
2450 current_reps, needed_reps = dsa.get_rep_tables()
2451 for dn_str, rep in current_reps.items():
2452 for reps_from in rep.rep_repsFrom:
2453 DEBUG("rep %s" % rep)
2454 dsa_guid = str(reps_from.source_dsa_obj_guid)
2455 dsa_dn = guid_to_dnstr[dsa_guid]
2456 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2458 verify_and_dot('dsa_repsFrom_initial_all', dot_edges,
2459 directed=True, label=self.my_dsa_dnstr,
2460 properties=(), debug=DEBUG, verify=opts.verify,
2461 dot_files=opts.dot_files)
2463 dot_edges = []
2464 for link in self.sitelink_table.values():
2465 for a, b in itertools.combinations(link.site_list, 2):
2466 dot_edges.append((str(a), str(b)))
2467 properties = ('connected',)
2468 verify_and_dot('dsa_sitelink_initial', dot_edges,
2469 directed=False,
2470 label=self.my_dsa_dnstr, properties=properties,
2471 debug=DEBUG, verify=opts.verify,
2472 dot_files=opts.dot_files)
2474 if forget_local_links:
2475 for dsa in self.my_site.dsa_table.values():
2476 dsa.connect_table = {k:v for k, v in dsa.connect_table.items()
2477 if v.is_rodc_topology()}
2478 self.plot_all_connections('dsa_forgotten_local')
2480 if forget_intersite_links:
2481 for site in self.site_table.values():
2482 for dsa in site.dsa_table.values():
2483 dsa.connect_table = {k:v for k, v in dsa.connect_table.items()
2484 if site is self.my_site and v.is_rodc_topology()}
2486 self.plot_all_connections('dsa_forgotten_all')
2487 # These are the published steps (in order) for the
2488 # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)
2490 # Step 1
2491 self.refresh_failed_links_connections()
2493 # Step 2
2494 self.intrasite()
2496 # Step 3
2497 all_connected = self.intersite()
2499 # Step 4
2500 self.remove_unneeded_ntdsconn(all_connected)
2502 # Step 5
2503 self.translate_ntdsconn()
2505 # Step 6
2506 self.remove_unneeded_failed_links_connections()
2508 # Step 7
2509 self.update_rodc_connection()
2511 if opts.verify or opts.dot_files:
2512 self.plot_all_connections('dsa_final',
2513 ('connected', 'forest_of_rings'))
2515 DEBUG_MAGENTA("there are %d dsa guids" % len(guid_to_dnstr))
2517 dot_edges = []
2518 edge_colors = []
2519 my_dnstr = self.my_dsa.dsa_dnstr
2520 current_reps, needed_reps = self.my_dsa.get_rep_tables()
2521 for dnstr, n_rep in needed_reps.items():
2522 for reps_from in n_rep.rep_repsFrom:
2523 guid_str = str(reps_from.source_dsa_obj_guid)
2524 dot_edges.append((my_dnstr, guid_to_dnstr[guid_str]))
2525 edge_colors.append('#' + str(n_rep.nc_guid)[:6])
2527 verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True,
2528 label=self.my_dsa_dnstr,
2529 properties=(), debug=DEBUG, verify=opts.verify,
2530 dot_files=opts.dot_files,
2531 edge_colors=edge_colors)
2533 dot_edges = []
2535 for site in self.site_table.values():
2536 for dsa in site.dsa_table.values():
2537 current_reps, needed_reps = dsa.get_rep_tables()
2538 for n_rep in needed_reps.values():
2539 for reps_from in n_rep.rep_repsFrom:
2540 dsa_guid = str(reps_from.source_dsa_obj_guid)
2541 dsa_dn = guid_to_dnstr[dsa_guid]
2542 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2544 verify_and_dot('dsa_repsFrom_final_all', dot_edges,
2545 directed=True, label=self.my_dsa_dnstr,
2546 properties=(), debug=DEBUG, verify=opts.verify,
2547 dot_files=opts.dot_files)
2549 except:
2550 raise
2552 return 0
2554 def import_ldif(self, dburl, lp, creds, ldif_file):
2555 """Import all objects and attributes that are relevent
2556 to the KCC algorithms from a previously exported LDIF file.
2558 The point of this function is to allow a programmer/debugger to
2559 import an LDIF file with non-security relevent information that
2560 was previously extracted from a DC database. The LDIF file is used
2561 to create a temporary abbreviated database. The KCC algorithm can
2562 then run against this abbreviated database for debug or test
2563 verification that the topology generated is computationally the
2564 same between different OSes and algorithms.
2566 :param dburl: path to the temporary abbreviated db to create
2567 :param ldif_file: path to the ldif file to import
2569 try:
2570 self.samdb = ldif_utils.ldif_to_samdb(dburl, lp, creds, ldif_file,
2571 opts.forced_local_dsa)
2572 except ldif_utils.LdifError, e:
2573 print e
2574 return 1
2575 return 0
2577 def export_ldif(self, dburl, lp, creds, ldif_file):
2578 """Routine to extract all objects and attributes that are relevent
2579 to the KCC algorithms from a DC database.
2581 The point of this function is to allow a programmer/debugger to
2582 extract an LDIF file with non-security relevent information from
2583 a DC database. The LDIF file can then be used to "import" via
2584 the import_ldif() function this file into a temporary abbreviated
2585 database. The KCC algorithm can then run against this abbreviated
2586 database for debug or test verification that the topology generated
2587 is computationally the same between different OSes and algorithms.
2589 :param dburl: LDAP database URL to extract info from
2590 :param ldif_file: output LDIF file name to create
2592 try:
2593 ldif_utils.samdb_to_ldif_file(self.samdb, dburl, lp, creds,
2594 ldif_file)
2595 except ldif_utils.LdifError, e:
2596 print e
2597 return 1
2598 return 0
2600 ##################################################
2601 # Global Functions
2602 ##################################################
2605 def get_spanning_tree_edges(graph, my_site, label=None):
2606 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
2607 # just the shortest-paths connecting colored vertices
2609 internal_edges = set()
2611 for e_set in graph.edge_set:
2612 edgeType = None
2613 for v in graph.vertices:
2614 v.edges = []
2616 # All con_type in an edge set is the same
2617 for e in e_set.edges:
2618 edgeType = e.con_type
2619 for v in e.vertices:
2620 v.edges.append(e)
2622 if opts.verify or opts.dot_files:
2623 graph_edges = [(a.site.site_dnstr, b.site.site_dnstr)
2624 for a, b in
2625 itertools.chain(
2626 *(itertools.combinations(edge.vertices, 2)
2627 for edge in e_set.edges))]
2628 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2630 if opts.dot_files and opts.debug:
2631 write_dot_file('edgeset_%s' % (edgeType,), graph_edges,
2632 vertices=graph_nodes, label=label)
2634 if opts.verify:
2635 verify_graph('spanning tree edge set %s' % edgeType,
2636 graph_edges, vertices=graph_nodes,
2637 properties=('complete', 'connected'),
2638 debug=DEBUG)
2640 # Run dijkstra's algorithm with just the red vertices as seeds
2641 # Seed from the full replicas
2642 dijkstra(graph, edgeType, False)
2644 # Process edge set
2645 process_edge_set(graph, e_set, internal_edges)
2647 # Run dijkstra's algorithm with red and black vertices as the seeds
2648 # Seed from both full and partial replicas
2649 dijkstra(graph, edgeType, True)
2651 # Process edge set
2652 process_edge_set(graph, e_set, internal_edges)
2654 # All vertices have root/component as itself
2655 setup_vertices(graph)
2656 process_edge_set(graph, None, internal_edges)
2658 if opts.verify or opts.dot_files:
2659 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
2660 for e in internal_edges]
2661 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2662 verify_properties = ('multi_edge_forest',)
2663 verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label,
2664 properties=verify_properties, debug=DEBUG,
2665 verify=opts.verify,
2666 dot_files=opts.dot_files)
2668 # Phase 2: Run Kruskal's on the internal edges
2669 output_edges, components = kruskal(graph, internal_edges)
2671 # This recalculates the cost for the path connecting the
2672 # closest red vertex. Ignoring types is fine because NO
2673 # suboptimal edge should exist in the graph
2674 dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename
2675 # Phase 3: Process the output
2676 for v in graph.vertices:
2677 if v.is_red():
2678 v.dist_to_red = 0
2679 else:
2680 v.dist_to_red = v.repl_info.cost
2682 if opts.verify or opts.dot_files:
2683 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
2684 for e in internal_edges]
2685 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2686 verify_properties = ('multi_edge_forest',)
2687 verify_and_dot('postkruskal', graph_edges, graph_nodes,
2688 label=label, properties=verify_properties,
2689 debug=DEBUG, verify=opts.verify,
2690 dot_files=opts.dot_files)
2692 # Ensure only one-way connections for partial-replicas,
2693 # and make sure they point the right way.
2694 edge_list = []
2695 for edge in output_edges:
2696 # We know these edges only have two endpoints because we made
2697 # them.
2698 v, w = edge.vertices
2699 if v.site is my_site or w.site is my_site:
2700 if (((v.is_black() or w.is_black()) and
2701 v.dist_to_red != MAX_DWORD)):
2702 edge.directed = True
2704 if w.dist_to_red < v.dist_to_red:
2705 edge.vertices[:] = w, v
2706 edge_list.append(edge)
2708 if opts.verify or opts.dot_files:
2709 graph_edges = [[x.site.site_dnstr for x in e.vertices]
2710 for e in edge_list]
2711 #add the reverse edge if not directed.
2712 graph_edges.extend([x.site.site_dnstr
2713 for x in reversed(e.vertices)]
2714 for e in edge_list if not e.directed)
2715 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2716 verify_properties = ()
2717 verify_and_dot('post-one-way-partial', graph_edges, graph_nodes,
2718 label=label, properties=verify_properties,
2719 debug=DEBUG, verify=opts.verify,
2720 directed=True,
2721 dot_files=opts.dot_files)
2723 # count the components
2724 return edge_list, components
2727 def sort_replica_by_dsa_guid(rep1, rep2):
2728 return cmp(ndr_pack(rep1.rep_dsa_guid), ndr_pack(rep2.rep_dsa_guid))
2731 def sort_dsa_by_gc_and_guid(dsa1, dsa2):
2732 if dsa1.is_gc() and not dsa2.is_gc():
2733 return -1
2734 if not dsa1.is_gc() and dsa2.is_gc():
2735 return +1
2736 return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))
2739 def is_smtp_replication_available():
2740 """Currently always returns false because Samba
2741 doesn't implement SMTP transfer for NC changes
2742 between DCs
2744 return False
2747 def create_edge(con_type, site_link, guid_to_vertex):
2748 e = MultiEdge()
2749 e.site_link = site_link
2750 e.vertices = []
2751 for site_guid in site_link.site_list:
2752 if str(site_guid) in guid_to_vertex:
2753 e.vertices.extend(guid_to_vertex.get(str(site_guid)))
2754 e.repl_info.cost = site_link.cost
2755 e.repl_info.options = site_link.options
2756 e.repl_info.interval = site_link.interval
2757 e.repl_info.schedule = convert_schedule_to_repltimes(site_link.schedule)
2758 e.con_type = con_type
2759 e.directed = False
2760 return e
2763 def create_auto_edge_set(graph, transport):
2764 e_set = MultiEdgeSet()
2765 # use a NULL guid, not associated with a SiteLinkBridge object
2766 e_set.guid = misc.GUID()
2767 for site_link in graph.edges:
2768 if site_link.con_type == transport:
2769 e_set.edges.append(site_link)
2771 return e_set
2774 def create_edge_set(graph, transport, site_link_bridge):
2775 # TODO not implemented - need to store all site link bridges
2776 e_set = MultiEdgeSet()
2777 # e_set.guid = site_link_bridge
2778 return e_set
2781 def setup_vertices(graph):
2782 for v in graph.vertices:
2783 if v.is_white():
2784 v.repl_info.cost = MAX_DWORD
2785 v.root = None
2786 v.component_id = None
2787 else:
2788 v.repl_info.cost = 0
2789 v.root = v
2790 v.component_id = v
2792 v.repl_info.interval = 0
2793 v.repl_info.options = 0xFFFFFFFF
2794 v.repl_info.schedule = None # TODO highly suspicious
2795 v.demoted = False
2798 def dijkstra(graph, edge_type, include_black):
2799 queue = []
2800 setup_dijkstra(graph, edge_type, include_black, queue)
2801 while len(queue) > 0:
2802 cost, guid, vertex = heapq.heappop(queue)
2803 for edge in vertex.edges:
2804 for v in edge.vertices:
2805 if v is not vertex:
2806 # add new path from vertex to v
2807 try_new_path(graph, queue, vertex, edge, v)
2810 def setup_dijkstra(graph, edge_type, include_black, queue):
2811 setup_vertices(graph)
2812 for vertex in graph.vertices:
2813 if vertex.is_white():
2814 continue
2816 if (((vertex.is_black() and not include_black)
2817 or edge_type not in vertex.accept_black
2818 or edge_type not in vertex.accept_red_red)):
2819 vertex.repl_info.cost = MAX_DWORD
2820 vertex.root = None # NULL GUID
2821 vertex.demoted = True # Demoted appears not to be used
2822 else:
2823 heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex))
2826 def try_new_path(graph, queue, vfrom, edge, vto):
2827 newRI = ReplInfo()
2828 # What this function checks is that there is a valid time frame for
2829 # which replication can actually occur, despite being adequately
2830 # connected
2831 intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI)
2833 # If the new path costs more than the current, then ignore the edge
2834 if newRI.cost > vto.repl_info.cost:
2835 return
2837 if newRI.cost < vto.repl_info.cost and not intersect:
2838 return
2840 new_duration = total_schedule(newRI.schedule)
2841 old_duration = total_schedule(vto.repl_info.schedule)
2843 # Cheaper or longer schedule
2844 if newRI.cost < vto.repl_info.cost or new_duration > old_duration:
2845 vto.root = vfrom.root
2846 vto.component_id = vfrom.component_id
2847 vto.repl_info = newRI
2848 heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))
2851 def check_demote_vertex(vertex, edge_type):
2852 if vertex.is_white():
2853 return
2855 # Accepts neither red-red nor black edges, demote
2856 if ((edge_type not in vertex.accept_black and
2857 edge_type not in vertex.accept_red_red)):
2858 vertex.repl_info.cost = MAX_DWORD
2859 vertex.root = None
2860 vertex.demoted = True # Demoted appears not to be used
2863 def undemote_vertex(vertex):
2864 if vertex.is_white():
2865 return
2867 vertex.repl_info.cost = 0
2868 vertex.root = vertex
2869 vertex.demoted = False
2872 def process_edge_set(graph, e_set, internal_edges):
2873 if e_set is None:
2874 for edge in graph.edges:
2875 for vertex in edge.vertices:
2876 check_demote_vertex(vertex, edge.con_type)
2877 process_edge(graph, edge, internal_edges)
2878 for vertex in edge.vertices:
2879 undemote_vertex(vertex)
2880 else:
2881 for edge in e_set.edges:
2882 process_edge(graph, edge, internal_edges)
2885 def process_edge(graph, examine, internal_edges):
2886 # Find the set of all vertices touches the edge to examine
2887 vertices = []
2888 for v in examine.vertices:
2889 # Append a 4-tuple of color, repl cost, guid and vertex
2890 vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v))
2891 # Sort by color, lower
2892 DEBUG("vertices is %s" % vertices)
2893 vertices.sort()
2895 color, cost, guid, bestv = vertices[0]
2896 # Add to internal edges an edge from every colored vertex to bestV
2897 for v in examine.vertices:
2898 if v.component_id is None or v.root is None:
2899 continue
2901 # Only add edge if valid inter-tree edge - needs a root and
2902 # different components
2903 if ((bestv.component_id is not None and
2904 bestv.root is not None and
2905 v.component_id is not None and
2906 v.root is not None and
2907 bestv.component_id != v.component_id)):
2908 add_int_edge(graph, internal_edges, examine, bestv, v)
2911 # Add internal edge, endpoints are roots of the vertices to pass in
2912 # and are always colored
2913 def add_int_edge(graph, internal_edges, examine, v1, v2):
2914 root1 = v1.root
2915 root2 = v2.root
2917 red_red = False
2918 if root1.is_red() and root2.is_red():
2919 red_red = True
2921 if red_red:
2922 if ((examine.con_type not in root1.accept_red_red
2923 or examine.con_type not in root2.accept_red_red)):
2924 return
2925 elif (examine.con_type not in root1.accept_black
2926 or examine.con_type not in root2.accept_black):
2927 return
2929 ri = ReplInfo()
2930 ri2 = ReplInfo()
2932 # Create the transitive replInfo for the two trees and this edge
2933 if not combine_repl_info(v1.repl_info, v2.repl_info, ri):
2934 return
2935 # ri is now initialized
2936 if not combine_repl_info(ri, examine.repl_info, ri2):
2937 return
2939 newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type,
2940 examine.site_link)
2941 # Order by vertex guid
2942 #XXX guid comparison using ndr_pack
2943 if newIntEdge.v1.ndrpacked_guid > newIntEdge.v2.ndrpacked_guid:
2944 newIntEdge.v1 = root2
2945 newIntEdge.v2 = root1
2947 internal_edges.add(newIntEdge)
2950 def kruskal(graph, edges):
2951 for v in graph.vertices:
2952 v.edges = []
2954 components = set([x for x in graph.vertices if not x.is_white()])
2955 edges = list(edges)
2957 # Sorted based on internal comparison function of internal edge
2958 edges.sort()
2960 expected_num_tree_edges = 0 # TODO this value makes little sense
2962 count_edges = 0
2963 output_edges = []
2964 index = 0
2965 while index < len(edges): # TODO and num_components > 1
2966 e = edges[index]
2967 parent1 = find_component(e.v1)
2968 parent2 = find_component(e.v2)
2969 if parent1 is not parent2:
2970 count_edges += 1
2971 add_out_edge(graph, output_edges, e)
2972 parent1.component_id = parent2
2973 components.discard(parent1)
2975 index += 1
2977 return output_edges, len(components)
2980 def find_component(vertex):
2981 if vertex.component_id is vertex:
2982 return vertex
2984 current = vertex
2985 while current.component_id is not current:
2986 current = current.component_id
2988 root = current
2989 current = vertex
2990 while current.component_id is not root:
2991 n = current.component_id
2992 current.component_id = root
2993 current = n
2995 return root
2998 def add_out_edge(graph, output_edges, e):
2999 v1 = e.v1
3000 v2 = e.v2
3002 # This multi-edge is a 'real' edge with no GUID
3003 ee = MultiEdge()
3004 ee.directed = False
3005 ee.site_link = e.site_link
3006 ee.vertices.append(v1)
3007 ee.vertices.append(v2)
3008 ee.con_type = e.e_type
3009 ee.repl_info = e.repl_info
3010 output_edges.append(ee)
3012 v1.edges.append(ee)
3013 v2.edges.append(ee)
3016 def test_all_reps_from(lp, creds, rng_seed=None):
3017 kcc = KCC()
3018 kcc.load_samdb(opts.dburl, lp, creds)
3019 dsas = kcc.list_dsas()
3020 needed_parts = {}
3021 current_parts = {}
3023 guid_to_dnstr = {}
3024 for site in kcc.site_table.values():
3025 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
3026 for dnstr, dsa in site.dsa_table.items())
3028 dot_edges = []
3029 dot_vertices = []
3030 colours = []
3031 vertex_colours = []
3033 for dsa_dn in dsas:
3034 if rng_seed:
3035 random.seed(rng_seed)
3036 kcc = KCC()
3037 kcc.run(opts.dburl, lp, creds, forced_local_dsa=dsa_dn,
3038 forget_local_links=opts.forget_local_links,
3039 forget_intersite_links=opts.forget_intersite_links)
3040 current, needed = kcc.my_dsa.get_rep_tables()
3042 for name, rep_table, rep_parts in (
3043 ('needed', needed, needed_parts),
3044 ('current', current, current_parts)):
3045 for part, nc_rep in rep_table.items():
3046 edges = rep_parts.setdefault(part, [])
3047 for reps_from in nc_rep.rep_repsFrom:
3048 source = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
3049 dest = guid_to_dnstr[str(nc_rep.rep_dsa_guid)]
3050 edges.append((source, dest))
3052 for site in kcc.site_table.values():
3053 for dsa in site.dsa_table.values():
3054 if dsa.is_ro():
3055 vertex_colours.append('#cc0000')
3056 else:
3057 vertex_colours.append('#0000cc')
3058 dot_vertices.append(dsa.dsa_dnstr)
3059 if dsa.connect_table:
3060 DEBUG_FN("DSA %s %s connections:\n%s" %
3061 (dsa.dsa_dnstr, len(dsa.connect_table),
3062 [x.from_dnstr for x in dsa.connect_table.values()]))
3063 for con in dsa.connect_table.values():
3064 if con.is_rodc_topology():
3065 colours.append('red')
3066 else:
3067 colours.append('blue')
3068 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
3070 verify_and_dot('all-dsa-connections', dot_edges, vertices=dot_vertices,
3071 label="all dsa NTDSConnections", properties=(),
3072 debug=DEBUG, verify=opts.verify, dot_files=opts.dot_files,
3073 directed=True, edge_colors=colours,
3074 vertex_colors=vertex_colours)
3076 for name, rep_parts in (('needed', needed_parts),
3077 ('current', current_parts)):
3078 for part, edges in rep_parts.items():
3079 verify_and_dot('repsFrom_%s_all_%s' % (name, part), edges,
3080 directed=True, label=part,
3081 properties=(), debug=DEBUG, verify=opts.verify,
3082 dot_files=opts.dot_files)
3085 logger = logging.getLogger("samba_kcc")
3086 logger.addHandler(logging.StreamHandler(sys.stdout))
3087 DEBUG = logger.debug
3090 def _color_debug(*args, **kwargs):
3091 DEBUG('%s%s%s' % (kwargs['color'], args[0], C_NORMAL), *args[1:])
3093 _globals = globals()
3094 for _color in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW',
3095 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA',
3096 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'):
3097 _globals['DEBUG_' + _color] = partial(_color_debug, color=_globals[_color])
3100 def DEBUG_FN(msg=''):
3101 import traceback
3102 filename, lineno, function, text = traceback.extract_stack(None, 2)[0]
3103 DEBUG("%s%s:%s%s %s%s()%s '%s'" % (CYAN, filename, BLUE, lineno,
3104 CYAN, function, C_NORMAL, msg))
3107 ##################################################
3108 # samba_kcc entry point
3109 ##################################################
3111 parser = optparse.OptionParser("samba_kcc [options]")
3112 sambaopts = options.SambaOptions(parser)
3113 credopts = options.CredentialsOptions(parser)
3115 parser.add_option_group(sambaopts)
3116 parser.add_option_group(credopts)
3117 parser.add_option_group(options.VersionOptions(parser))
3119 parser.add_option("--readonly", default=False,
3120 help="compute topology but do not update database",
3121 action="store_true")
3123 parser.add_option("--debug",
3124 help="debug output",
3125 action="store_true")
3127 parser.add_option("--verify",
3128 help="verify that assorted invariants are kept",
3129 action="store_true")
3131 parser.add_option("--list-verify-tests",
3132 help=("list what verification actions are available "
3133 "and do nothing else"),
3134 action="store_true")
3136 parser.add_option("--no-dot-files", dest='dot_files',
3137 help="Don't write dot graph files in /tmp",
3138 default=True, action="store_false")
3140 parser.add_option("--seed",
3141 help="random number seed",
3142 type=int)
3144 parser.add_option("--importldif",
3145 help="import topology ldif file",
3146 type=str, metavar="<file>")
3148 parser.add_option("--exportldif",
3149 help="export topology ldif file",
3150 type=str, metavar="<file>")
3152 parser.add_option("-H", "--URL",
3153 help="LDB URL for database or target server",
3154 type=str, metavar="<URL>", dest="dburl")
3156 parser.add_option("--tmpdb",
3157 help="schemaless database file to create for ldif import",
3158 type=str, metavar="<file>")
3160 parser.add_option("--now",
3161 help=("assume current time is this ('YYYYmmddHHMMSS[tz]',"
3162 " default: system time)"),
3163 type=str, metavar="<date>")
3165 parser.add_option("--forced-local-dsa",
3166 help="run calculations assuming the DSA is this DN",
3167 type=str, metavar="<DSA>")
3169 parser.add_option("--attempt-live-connections", default=False,
3170 help="Attempt to connect to other DSAs to test links",
3171 action="store_true")
3173 parser.add_option("--list-valid-dsas", default=False,
3174 help=("Print a list of DSA dnstrs that could be"
3175 " used in --forced-local-dsa"),
3176 action="store_true")
3178 parser.add_option("--test-all-reps-from", default=False,
3179 help="Create and verify a graph of reps-from for every DSA",
3180 action="store_true")
3182 parser.add_option("--forget-local-links", default=False,
3183 help="pretend not to know the existing local topology",
3184 action="store_true")
3186 parser.add_option("--forget-intersite-links", default=False,
3187 help="pretend not to know the existing intersite topology",
3188 action="store_true")
3191 opts, args = parser.parse_args()
3194 if opts.list_verify_tests:
3195 list_verify_tests()
3196 sys.exit(0)
3198 if opts.debug:
3199 logger.setLevel(logging.DEBUG)
3200 elif opts.readonly:
3201 logger.setLevel(logging.INFO)
3202 else:
3203 logger.setLevel(logging.WARNING)
3205 # initialize seed from optional input parameter
3206 if opts.seed:
3207 random.seed(opts.seed)
3208 else:
3209 random.seed(0xACE5CA11)
3211 if opts.now:
3212 for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"):
3213 try:
3214 now_tuple = time.strptime(opts.now, timeformat)
3215 break
3216 except ValueError:
3217 pass
3218 else:
3219 # else happens if break doesn't --> no match
3220 print >> sys.stderr, "could not parse time '%s'" % opts.now
3221 sys.exit(1)
3223 unix_now = int(time.mktime(now_tuple))
3224 else:
3225 unix_now = int(time.time())
3227 nt_now = unix2nttime(unix_now)
3229 lp = sambaopts.get_loadparm()
3230 creds = credopts.get_credentials(lp, fallback_machine=True)
3232 if opts.dburl is None:
3233 opts.dburl = lp.samdb_url()
3235 if opts.test_all_reps_from:
3236 opts.readonly = True
3237 rng_seed = opts.seed or 0xACE5CA11
3238 test_all_reps_from(lp, creds, rng_seed=rng_seed)
3239 sys.exit()
3241 # Instantiate Knowledge Consistency Checker and perform run
3242 kcc = KCC()
3244 if opts.exportldif:
3245 rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
3246 sys.exit(rc)
3248 if opts.importldif:
3249 if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
3250 logger.error("Specify a target temp database file with --tmpdb option")
3251 sys.exit(1)
3253 rc = kcc.import_ldif(opts.tmpdb, lp, creds, opts.importldif)
3254 if rc != 0:
3255 sys.exit(rc)
3257 if opts.list_valid_dsas:
3258 kcc.load_samdb(opts.dburl, lp, creds)
3259 print '\n'.join(kcc.list_dsas())
3260 sys.exit()
3262 try:
3263 rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa,
3264 opts.forget_local_links, opts.forget_intersite_links)
3265 sys.exit(rc)
3267 except GraphError, e:
3268 print e
3269 sys.exit(1)