KCC: improve docstring for KCC.is_bridgehead_failed()
[Samba.git] / source4 / scripting / bin / samba_kcc
blob6947f2db0af5b90b522c75337dff3eb2cc7b2e82
1 #!/usr/bin/env python
3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
6 # Copyright (C) Andrew Bartlett 2015
8 # Andrew Bartlett's alleged work performed by his underlings Douglas
9 # Bagnall and Garming Sam.
11 # This program is free software; you can redistribute it and/or modify
12 # it under the terms of the GNU General Public License as published by
13 # the Free Software Foundation; either version 3 of the License, or
14 # (at your option) any later version.
16 # This program is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU General Public License for more details.
21 # You should have received a copy of the GNU General Public License
22 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 import os
25 import sys
26 import random
27 import uuid
29 # ensure we get messages out immediately, so they get in the samba logs,
30 # and don't get swallowed by a timeout
31 os.environ['PYTHONUNBUFFERED'] = '1'
33 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
34 # heimdal can get mutual authentication errors due to the 24 second difference
35 # between UTC and GMT when using some zone files (eg. the PDT zone from
36 # the US)
37 os.environ["TZ"] = "GMT"
39 # Find right directory when running from source tree
40 sys.path.insert(0, "bin/python")
42 import optparse
43 import logging
44 import itertools
45 import heapq
46 import time
47 from functools import partial
49 from samba import (
50 getopt as options,
51 ldb,
52 dsdb,
53 drs_utils,
54 nttime2unix)
55 from samba.auth import system_session
56 from samba.samdb import SamDB
57 from samba.dcerpc import drsuapi
58 from samba.kcc_utils import *
59 from samba.graph_utils import *
60 from samba import ldif_utils
63 class KCC(object):
64 """The Knowledge Consistency Checker class.
66 A container for objects and methods allowing a run of the KCC. Produces a
67 set of connections in the samdb for which the Distributed Replication
68 Service can then utilize to replicate naming contexts
70 :param unix_now: The putative current time in seconds since 1970.
71 :param read_only: Don't write to the database.
72 :param verify: Check topological invariants for the generated graphs
73 :param debug: Write verbosely to stderr.
74 "param dot_files: write Graphviz files in /tmp showing topology
75 """
76 def __init__(self):
77 """Initializes the partitions class which can hold
78 our local DCs partitions or all the partitions in
79 the forest
80 """
81 self.part_table = {} # partition objects
82 self.site_table = {}
83 self.transport_table = {}
84 self.ip_transport = None
85 self.sitelink_table = {}
86 self.dsa_by_dnstr = {}
87 self.dsa_by_guid = {}
89 self.get_dsa_by_guidstr = self.dsa_by_guid.get
90 self.get_dsa = self.dsa_by_dnstr.get
92 # TODO: These should be backed by a 'permanent' store so that when
93 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
94 # the failure information can be returned
95 self.kcc_failed_links = {}
96 self.kcc_failed_connections = set()
98 # Used in inter-site topology computation. A list
99 # of connections (by NTDSConnection object) that are
100 # to be kept when pruning un-needed NTDS Connections
101 self.kept_connections = set()
103 self.my_dsa_dnstr = None # My dsa DN
104 self.my_dsa = None # My dsa object
106 self.my_site_dnstr = None
107 self.my_site = None
109 self.samdb = None
111 def load_all_transports(self):
112 """Loads the inter-site transport objects for Sites
114 :return: None
115 :raise KCCError: if no IP transport is found
117 try:
118 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
119 self.samdb.get_config_basedn(),
120 scope=ldb.SCOPE_SUBTREE,
121 expression="(objectClass=interSiteTransport)")
122 except ldb.LdbError, (enum, estr):
123 raise KCCError("Unable to find inter-site transports - (%s)" %
124 estr)
126 for msg in res:
127 dnstr = str(msg.dn)
129 transport = Transport(dnstr)
131 transport.load_transport(self.samdb)
132 self.transport_table.setdefault(str(transport.guid),
133 transport)
134 if transport.name == 'IP':
135 self.ip_transport = transport
137 if self.ip_transport is None:
138 raise KCCError("there doesn't seem to be an IP transport")
140 def load_all_sitelinks(self):
141 """Loads the inter-site siteLink objects
143 :return: None
144 :raise KCCError: if site-links aren't found
146 try:
147 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
148 self.samdb.get_config_basedn(),
149 scope=ldb.SCOPE_SUBTREE,
150 expression="(objectClass=siteLink)")
151 except ldb.LdbError, (enum, estr):
152 raise KCCError("Unable to find inter-site siteLinks - (%s)" % estr)
154 for msg in res:
155 dnstr = str(msg.dn)
157 # already loaded
158 if dnstr in self.sitelink_table:
159 continue
161 sitelink = SiteLink(dnstr)
163 sitelink.load_sitelink(self.samdb)
165 # Assign this siteLink to table
166 # and index by dn
167 self.sitelink_table[dnstr] = sitelink
169 def load_site(self, dn_str):
170 """Helper for load_my_site and load_all_sites.
172 Put all the site's DSAs into the KCC indices.
174 :param dn_str: a site dn_str
175 :return: the Site object pertaining to the dn_str
177 site = Site(dn_str, unix_now)
178 site.load_site(self.samdb)
180 # We avoid replacing the site with an identical copy in case
181 # somewhere else has a reference to the old one, which would
182 # lead to all manner of confusion and chaos.
183 guid = str(site.site_guid)
184 if guid not in self.site_table:
185 self.site_table[guid] = site
186 self.dsa_by_dnstr.update(site.dsa_table)
187 self.dsa_by_guid.update((str(x.dsa_guid), x)
188 for x in site.dsa_table.values())
190 return self.site_table[guid]
192 def load_my_site(self):
193 """Load the Site object for the local DSA.
195 :return: None
197 self.my_site_dnstr = ("CN=%s,CN=Sites,%s" % (
198 self.samdb.server_site_name(),
199 self.samdb.get_config_basedn()))
201 self.my_site = self.load_site(self.my_site_dnstr)
203 def load_all_sites(self):
204 """Discover all sites and create Site objects.
206 :return: None
207 :raise: KCCError if sites can't be found
209 try:
210 res = self.samdb.search("CN=Sites,%s" %
211 self.samdb.get_config_basedn(),
212 scope=ldb.SCOPE_SUBTREE,
213 expression="(objectClass=site)")
214 except ldb.LdbError, (enum, estr):
215 raise KCCError("Unable to find sites - (%s)" % estr)
217 for msg in res:
218 sitestr = str(msg.dn)
219 self.load_site(sitestr)
221 def load_my_dsa(self):
222 """Discover my nTDSDSA dn thru the rootDSE entry
224 :return: None
225 :raise: KCCError if DSA can't be found
227 dn = ldb.Dn(self.samdb, "<GUID=%s>" % self.samdb.get_ntds_GUID())
228 try:
229 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
230 attrs=["objectGUID"])
231 except ldb.LdbError, (enum, estr):
232 logger.warning("Search for %s failed: %s. This typically happens"
233 " in --importldif mode due to lack of module"
234 " support.", dn, estr)
235 try:
236 # We work around the failure above by looking at the
237 # dsServiceName that was put in the fake rootdse by
238 # the --exportldif, rather than the
239 # samdb.get_ntds_GUID(). The disadvantage is that this
240 # mode requires we modify the @ROOTDSE dnq to support
241 # --forced-local-dsa
242 service_name_res = self.samdb.search(base="",
243 scope=ldb.SCOPE_BASE,
244 attrs=["dsServiceName"])
245 dn = ldb.Dn(self.samdb,
246 service_name_res[0]["dsServiceName"][0])
248 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
249 attrs=["objectGUID"])
250 except ldb.LdbError, (enum, estr):
251 raise KCCError("Unable to find my nTDSDSA - (%s)" % estr)
253 if len(res) != 1:
254 raise KCCError("Unable to find my nTDSDSA at %s" %
255 dn.extended_str())
257 ntds_guid = misc.GUID(self.samdb.get_ntds_GUID())
258 if misc.GUID(res[0]["objectGUID"][0]) != ntds_guid:
259 raise KCCError("Did not find the GUID we expected,"
260 " perhaps due to --importldif")
262 self.my_dsa_dnstr = str(res[0].dn)
264 self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
266 if self.my_dsa_dnstr not in self.dsa_by_dnstr:
267 DEBUG_DARK_YELLOW("my_dsa %s isn't in self.dsas_by_dnstr:"
268 " it must be RODC.\n"
269 "Let's add it, because my_dsa is special!\n"
270 "(likewise for self.dsa_by_guid of course)" %
271 self.my_dsas_dnstr)
273 self.dsa_by_dnstr[self.my_dsa_dnstr] = self.my_dsa
274 self.dsa_by_guid[str(self.my_dsa.dsa_guid)] = self.my_dsa
276 def load_all_partitions(self):
277 """Discover and load all partitions.
279 Each NC is inserted into the part_table by partition
280 dn string (not the nCName dn string)
282 :return: None
283 :raise: KCCError if partitions can't be found
285 try:
286 res = self.samdb.search("CN=Partitions,%s" %
287 self.samdb.get_config_basedn(),
288 scope=ldb.SCOPE_SUBTREE,
289 expression="(objectClass=crossRef)")
290 except ldb.LdbError, (enum, estr):
291 raise KCCError("Unable to find partitions - (%s)" % estr)
293 for msg in res:
294 partstr = str(msg.dn)
296 # already loaded
297 if partstr in self.part_table:
298 continue
300 part = Partition(partstr)
302 part.load_partition(self.samdb)
303 self.part_table[partstr] = part
305 def should_be_present_test(self):
306 """Enumerate all loaded partitions and DSAs in local
307 site and test if NC should be present as replica
309 for partdn, part in self.part_table.items():
310 for dsadn, dsa in self.my_site.dsa_table.items():
311 needed, ro, partial = part.should_be_present(dsa)
312 logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
313 (dsadn, part.nc_dnstr, needed, ro, partial))
315 def refresh_failed_links_connections(self):
316 """Based on MS-ADTS 6.2.2.1"""
318 # Instead of NULL link with failure_count = 0, the tuple is
319 # simply removed
321 # LINKS: Refresh failed links
322 self.kcc_failed_links = {}
323 current, needed = self.my_dsa.get_rep_tables()
324 for replica in current.values():
325 # For every possible connection to replicate
326 for reps_from in replica.rep_repsFrom:
327 failure_count = reps_from.consecutive_sync_failures
328 if failure_count <= 0:
329 continue
331 dsa_guid = str(reps_from.source_dsa_obj_guid)
332 time_first_failure = reps_from.last_success
333 last_result = reps_from.last_attempt
334 dns_name = reps_from.dns_name1
336 f = self.kcc_failed_links.get(dsa_guid)
337 if not f:
338 f = KCCFailedObject(dsa_guid, failure_count,
339 time_first_failure, last_result,
340 dns_name)
341 self.kcc_failed_links[dsa_guid] = f
342 #elif f.failure_count == 0:
343 # f.failure_count = failure_count
344 # f.time_first_failure = time_first_failure
345 # f.last_result = last_result
346 else:
347 f.failure_count = max(f.failure_count, failure_count)
348 f.time_first_failure = min(f.time_first_failure,
349 time_first_failure)
350 f.last_result = last_result
352 # CONNECTIONS: Refresh failed connections
353 restore_connections = set()
354 if opts.attempt_live_connections:
355 DEBUG("refresh_failed_links: checking if links are still down")
356 for connection in self.kcc_failed_connections:
357 try:
358 drs_utils.drsuapi_connect(connection.dns_name, lp, creds)
359 # Failed connection is no longer failing
360 restore_connections.add(connection)
361 except drs_utils.drsException:
362 # Failed connection still failing
363 connection.failure_count += 1
364 else:
365 DEBUG("refresh_failed_links: not checking live links because we\n"
366 "weren't asked to --attempt-live-connections")
368 # Remove the restored connections from the failed connections
369 self.kcc_failed_connections.difference_update(restore_connections)
371 def is_stale_link_connection(self, target_dsa):
372 """Check whether a link to a remote DSA is stale
374 Used in MS-ADTS 6.2.2.2 Intrasite Connection Creation
376 Returns True if the remote seems to have been down for at
377 least two hours, otherwise False.
379 :param target_dsa: the remote DSA object
380 :return: True if link is stale, otherwise False
382 failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
383 if failed_link:
384 # failure_count should be > 0, but check anyways
385 if failed_link.failure_count > 0:
386 unix_first_failure = \
387 nttime2unix(failed_link.time_first_failure)
388 # TODO guard against future
389 if unix_first_failure > unix_now:
390 logger.error("The last success time attribute for \
391 repsFrom is in the future!")
393 # Perform calculation in seconds
394 if (unix_now - unix_first_failure) > 60 * 60 * 2:
395 return True
397 # TODO connections
399 return False
401 # TODO: This should be backed by some form of local database
402 def remove_unneeded_failed_links_connections(self):
403 # Remove all tuples in kcc_failed_links where failure count = 0
404 # In this implementation, this should never happen.
406 # Remove all connections which were not used this run or connections
407 # that became active during this run.
408 pass
410 def remove_unneeded_ntdsconn(self, all_connected):
411 """Remove unneeded NTDS Connections once topology is calculated
413 Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections
415 :param all_connected: indicates whether all sites are connected
416 :return: None
418 mydsa = self.my_dsa
420 # New connections won't have GUIDs which are needed for
421 # sorting. Add them.
422 for cn_conn in mydsa.connect_table.values():
423 if cn_conn.guid is None:
424 if opts.readonly:
425 cn_conn.guid = misc.GUID(str(uuid.uuid4()))
426 cn_conn.whenCreated = nt_now
427 else:
428 cn_conn.load_connection(self.samdb)
430 for cn_conn in mydsa.connect_table.values():
432 s_dnstr = cn_conn.get_from_dnstr()
433 if s_dnstr is None:
434 cn_conn.to_be_deleted = True
435 continue
437 # Get the source DSA no matter what site
438 # XXX s_dsa is NEVER USED. It will be removed.
439 s_dsa = self.get_dsa(s_dnstr)
441 #XXX should an RODC be regarded as same site
442 same_site = s_dnstr in self.my_site.dsa_table
444 # Given an nTDSConnection object cn, if the DC with the
445 # nTDSDSA object dc that is the parent object of cn and
446 # the DC with the nTDSDA object referenced by cn!fromServer
447 # are in the same site, the KCC on dc deletes cn if all of
448 # the following are true:
450 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
452 # No site settings object s exists for the local DC's site, or
453 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
454 # s!options.
456 # Another nTDSConnection object cn2 exists such that cn and
457 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
458 # and either
460 # cn!whenCreated < cn2!whenCreated
462 # cn!whenCreated = cn2!whenCreated and
463 # cn!objectGUID < cn2!objectGUID
465 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
466 if same_site:
467 if not cn_conn.is_generated():
468 continue
470 if self.my_site.is_cleanup_ntdsconn_disabled():
471 continue
473 # Loop thru connections looking for a duplicate that
474 # fulfills the previous criteria
475 lesser = False
476 packed_guid = ndr_pack(cn_conn.guid)
477 for cn2_conn in mydsa.connect_table.values():
478 if cn2_conn is cn_conn:
479 continue
481 s2_dnstr = cn2_conn.get_from_dnstr()
483 # If the NTDS Connections has a different
484 # fromServer field then no match
485 if s2_dnstr != s_dnstr:
486 continue
488 #XXX GUID comparison
489 lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or
490 (cn_conn.whenCreated == cn2_conn.whenCreated and
491 packed_guid < ndr_pack(cn2_conn.guid)))
493 if lesser:
494 break
496 if lesser and not cn_conn.is_rodc_topology():
497 cn_conn.to_be_deleted = True
499 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
500 # object dc that is the parent object of cn and the DC with
501 # the nTDSDSA object referenced by cn!fromServer are in
502 # different sites, a KCC acting as an ISTG in dc's site
503 # deletes cn if all of the following are true:
505 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
507 # cn!fromServer references an nTDSDSA object for a DC
508 # in a site other than the local DC's site.
510 # The keepConnections sequence returned by
511 # CreateIntersiteConnections() does not contain
512 # cn!objectGUID, or cn is "superseded by" (see below)
513 # another nTDSConnection cn2 and keepConnections
514 # contains cn2!objectGUID.
516 # The return value of CreateIntersiteConnections()
517 # was true.
519 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
520 # cn!options
522 else: # different site
524 if not mydsa.is_istg():
525 continue
527 if not cn_conn.is_generated():
528 continue
530 # TODO
531 # We are directly using this connection in intersite or
532 # we are using a connection which can supersede this one.
534 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
535 # appear to be correct.
537 # 1. cn!fromServer and cn!parent appear inconsistent with
538 # no cn2
539 # 2. The repsFrom do not imply each other
541 if cn_conn in self.kept_connections: # and not_superceded:
542 continue
544 # This is the result of create_intersite_connections
545 if not all_connected:
546 continue
548 if not cn_conn.is_rodc_topology():
549 cn_conn.to_be_deleted = True
551 if mydsa.is_ro() or opts.readonly:
552 for connect in mydsa.connect_table.values():
553 if connect.to_be_deleted:
554 DEBUG_FN("TO BE DELETED:\n%s" % connect)
555 if connect.to_be_added:
556 DEBUG_FN("TO BE ADDED:\n%s" % connect)
558 # Peform deletion from our tables but perform
559 # no database modification
560 mydsa.commit_connections(self.samdb, ro=True)
561 else:
562 # Commit any modified connections
563 mydsa.commit_connections(self.samdb)
565 def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
566 """Update an repsFrom object if required.
568 Part of MS-ADTS 6.2.2.5.
570 Update t_repsFrom if necessary to satisfy requirements. Such
571 updates are typically required when the IDL_DRSGetNCChanges
572 server has moved from one site to another--for example, to
573 enable compression when the server is moved from the
574 client's site to another site.
576 The repsFrom.update_flags bit field may be modified
577 auto-magically if any changes are made here. See
578 kcc_utils.RepsFromTo for gory details.
581 :param n_rep: NC replica we need
582 :param t_repsFrom: repsFrom tuple to modify
583 :param s_rep: NC replica at source DSA
584 :param s_dsa: source DSA
585 :param cn_conn: Local DSA NTDSConnection child
587 :return: None
589 s_dnstr = s_dsa.dsa_dnstr
590 update = 0x0
592 same_site = s_dnstr in self.my_site.dsa_table
594 # if schedule doesn't match then update and modify
595 times = convert_schedule_to_repltimes(cn_conn.schedule)
596 if times != t_repsFrom.schedule:
597 t_repsFrom.schedule = times
598 update |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
600 # Bit DRS_PER_SYNC is set in replicaFlags if and only
601 # if nTDSConnection schedule has a value v that specifies
602 # scheduled replication is to be performed at least once
603 # per week.
604 if cn_conn.is_schedule_minimum_once_per_week():
606 if ((t_repsFrom.replica_flags &
607 drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0):
608 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
610 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
611 # if the source DSA and the local DC's nTDSDSA object are
612 # in the same site or source dsa is the FSMO role owner
613 # of one or more FSMO roles in the NC replica.
614 if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
616 if ((t_repsFrom.replica_flags &
617 drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0):
618 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
620 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
621 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
622 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
623 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
624 # t.replicaFlags if and only if s and the local DC's
625 # nTDSDSA object are in different sites.
626 if ((cn_conn.options &
627 dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0):
629 if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
630 # XXX WARNING
632 # it LOOKS as if this next test is a bit silly: it
633 # checks the flag then sets it if it not set; the same
634 # effect could be achieved by unconditionally setting
635 # it. But in fact the repsFrom object has special
636 # magic attached to it, and altering replica_flags has
637 # side-effects. That is bad in my opinion, but there
638 # you go.
639 if ((t_repsFrom.replica_flags &
640 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
641 t_repsFrom.replica_flags |= \
642 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
644 elif not same_site:
646 if ((t_repsFrom.replica_flags &
647 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
648 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
650 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
651 # and only if s and the local DC's nTDSDSA object are
652 # not in the same site and the
653 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
654 # clear in cn!options
655 if (not same_site and
656 (cn_conn.options &
657 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
659 if ((t_repsFrom.replica_flags &
660 drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0):
661 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
663 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
664 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
665 if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
667 if ((t_repsFrom.replica_flags &
668 drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0):
669 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
671 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
672 # set in t.replicaFlags if and only if cn!enabledConnection = false.
673 if not cn_conn.is_enabled():
675 if ((t_repsFrom.replica_flags &
676 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0):
677 t_repsFrom.replica_flags |= \
678 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
680 if ((t_repsFrom.replica_flags &
681 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0):
682 t_repsFrom.replica_flags |= \
683 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
685 # If s and the local DC's nTDSDSA object are in the same site,
686 # cn!transportType has no value, or the RDN of cn!transportType
687 # is CN=IP:
689 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
691 # t.uuidTransport = NULL GUID.
693 # t.uuidDsa = The GUID-based DNS name of s.
695 # Otherwise:
697 # Bit DRS_MAIL_REP in t.replicaFlags is set.
699 # If x is the object with dsname cn!transportType,
700 # t.uuidTransport = x!objectGUID.
702 # Let a be the attribute identified by
703 # x!transportAddressAttribute. If a is
704 # the dNSHostName attribute, t.uuidDsa = the GUID-based
705 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
707 # It appears that the first statement i.e.
709 # "If s and the local DC's nTDSDSA object are in the same
710 # site, cn!transportType has no value, or the RDN of
711 # cn!transportType is CN=IP:"
713 # could be a slightly tighter statement if it had an "or"
714 # between each condition. I believe this should
715 # be interpreted as:
717 # IF (same-site) OR (no-value) OR (type-ip)
719 # because IP should be the primary transport mechanism
720 # (even in inter-site) and the absense of the transportType
721 # attribute should always imply IP no matter if its multi-site
723 # NOTE MS-TECH INCORRECT:
725 # All indications point to these statements above being
726 # incorrectly stated:
728 # t.uuidDsa = The GUID-based DNS name of s.
730 # Let a be the attribute identified by
731 # x!transportAddressAttribute. If a is
732 # the dNSHostName attribute, t.uuidDsa = the GUID-based
733 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
735 # because the uuidDSA is a GUID and not a GUID-base DNS
736 # name. Nor can uuidDsa hold (s!parent)!a if not
737 # dNSHostName. What should have been said is:
739 # t.naDsa = The GUID-based DNS name of s
741 # That would also be correct if transportAddressAttribute
742 # were "mailAddress" because (naDsa) can also correctly
743 # hold the SMTP ISM service address.
745 nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
747 # We're not currently supporting SMTP replication
748 # so is_smtp_replication_available() is currently
749 # always returning False
750 if ((same_site or
751 cn_conn.transport_dnstr is None or
752 cn_conn.transport_dnstr.find("CN=IP") == 0 or
753 not is_smtp_replication_available())):
755 if ((t_repsFrom.replica_flags &
756 drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0):
757 t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
759 t_repsFrom.transport_guid = misc.GUID()
761 # See (NOTE MS-TECH INCORRECT) above
762 if t_repsFrom.version == 0x1:
763 if t_repsFrom.dns_name1 is None or \
764 t_repsFrom.dns_name1 != nastr:
765 t_repsFrom.dns_name1 = nastr
766 else:
767 if t_repsFrom.dns_name1 is None or \
768 t_repsFrom.dns_name2 is None or \
769 t_repsFrom.dns_name1 != nastr or \
770 t_repsFrom.dns_name2 != nastr:
771 t_repsFrom.dns_name1 = nastr
772 t_repsFrom.dns_name2 = nastr
774 else:
775 # XXX This entire branch is NEVER used! Because we don't do SMTP!
776 # (see the if condition above). Just close your eyes here.
777 if ((t_repsFrom.replica_flags &
778 drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0):
779 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP
781 # We have a transport type but its not an
782 # object in the database
783 if cn_conn.transport_guid not in self.transport_table:
784 raise KCCError("Missing inter-site transport - (%s)" %
785 cn_conn.transport_dnstr)
787 x_transport = self.transport_table[str(cn_conn.transport_guid)]
789 if t_repsFrom.transport_guid != x_transport.guid:
790 t_repsFrom.transport_guid = x_transport.guid
792 # See (NOTE MS-TECH INCORRECT) above
793 if x_transport.address_attr == "dNSHostName":
795 if t_repsFrom.version == 0x1:
796 if t_repsFrom.dns_name1 is None or \
797 t_repsFrom.dns_name1 != nastr:
798 t_repsFrom.dns_name1 = nastr
799 else:
800 if t_repsFrom.dns_name1 is None or \
801 t_repsFrom.dns_name2 is None or \
802 t_repsFrom.dns_name1 != nastr or \
803 t_repsFrom.dns_name2 != nastr:
804 t_repsFrom.dns_name1 = nastr
805 t_repsFrom.dns_name2 = nastr
807 else:
808 # MS tech specification says we retrieve the named
809 # attribute in "transportAddressAttribute" from the parent of
810 # the DSA object
811 try:
812 pdnstr = s_dsa.get_parent_dnstr()
813 attrs = [x_transport.address_attr]
815 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
816 attrs=attrs)
817 except ldb.LdbError, (enum, estr):
818 raise KCCError(
819 "Unable to find attr (%s) for (%s) - (%s)" %
820 (x_transport.address_attr, pdnstr, estr))
822 msg = res[0]
823 nastr = str(msg[x_transport.address_attr][0])
825 # See (NOTE MS-TECH INCORRECT) above
826 if t_repsFrom.version == 0x1:
827 if t_repsFrom.dns_name1 is None or \
828 t_repsFrom.dns_name1 != nastr:
829 t_repsFrom.dns_name1 = nastr
830 else:
831 if t_repsFrom.dns_name1 is None or \
832 t_repsFrom.dns_name2 is None or \
833 t_repsFrom.dns_name1 != nastr or \
834 t_repsFrom.dns_name2 != nastr:
836 t_repsFrom.dns_name1 = nastr
837 t_repsFrom.dns_name2 = nastr
839 if t_repsFrom.is_modified():
840 logger.debug("modify_repsFrom(): %s" % t_repsFrom)
842 def is_repsFrom_implied(self, n_rep, cn_conn):
843 """Given a NC replica and NTDS Connection, determine if the connection
844 implies a repsFrom tuple should be present from the source DSA listed
845 in the connection to the naming context
847 :param n_rep: NC replica
848 :param conn: NTDS Connection
849 ::returns (True || False), source DSA:
851 #XXX different conditions for "implies" than MS-ADTS 6.2.2
853 # NTDS Connection must satisfy all the following criteria
854 # to imply a repsFrom tuple is needed:
856 # cn!enabledConnection = true.
857 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
858 # cn!fromServer references an nTDSDSA object.
860 s_dsa = None
862 if cn_conn.is_enabled() and not cn_conn.is_rodc_topology():
863 s_dnstr = cn_conn.get_from_dnstr()
864 if s_dnstr is not None:
865 s_dsa = self.get_dsa(s_dnstr)
867 # No DSA matching this source DN string?
868 if s_dsa is None:
869 return False, None
871 # To imply a repsFrom tuple is needed, each of these
872 # must be True:
874 # An NC replica of the NC "is present" on the DC to
875 # which the nTDSDSA object referenced by cn!fromServer
876 # corresponds.
878 # An NC replica of the NC "should be present" on
879 # the local DC
880 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
882 if s_rep is None or not s_rep.is_present():
883 return False, None
885 # To imply a repsFrom tuple is needed, each of these
886 # must be True:
888 # The NC replica on the DC referenced by cn!fromServer is
889 # a writable replica or the NC replica that "should be
890 # present" on the local DC is a partial replica.
892 # The NC is not a domain NC, the NC replica that
893 # "should be present" on the local DC is a partial
894 # replica, cn!transportType has no value, or
895 # cn!transportType has an RDN of CN=IP.
897 implied = (not s_rep.is_ro() or n_rep.is_partial()) and \
898 (not n_rep.is_domain() or
899 n_rep.is_partial() or
900 cn_conn.transport_dnstr is None or
901 cn_conn.transport_dnstr.find("CN=IP") == 0)
903 if implied:
904 return True, s_dsa
905 else:
906 return False, None
908 def translate_ntdsconn(self, current_dsa=None):
909 """Adjust repsFrom to match NTDSConnections
911 This function adjusts values of repsFrom abstract attributes of NC
912 replicas on the local DC to match those implied by
913 nTDSConnection objects.
915 Based on [MS-ADTS] 6.2.2.5
917 :param current_dsa: optional DSA on whose behalf we are acting.
918 :return: None
920 count = 0
922 if current_dsa is None:
923 current_dsa = self.my_dsa
925 if current_dsa.is_translate_ntdsconn_disabled():
926 logger.debug("skipping translate_ntdsconn() "
927 "because disabling flag is set")
928 return
930 logger.debug("translate_ntdsconn(): enter")
932 current_rep_table, needed_rep_table = current_dsa.get_rep_tables()
934 # Filled in with replicas we currently have that need deleting
935 delete_reps = set()
937 # We're using the MS notation names here to allow
938 # correlation back to the published algorithm.
940 # n_rep - NC replica (n)
941 # t_repsFrom - tuple (t) in n!repsFrom
942 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
943 # object (s) such that (s!objectGUID = t.uuidDsa)
944 # In our IDL representation of repsFrom the (uuidDsa)
945 # attribute is called (source_dsa_obj_guid)
946 # cn_conn - (cn) is nTDSConnection object and child of the local
947 # DC's nTDSDSA object and (cn!fromServer = s)
948 # s_rep - source DSA replica of n
950 # If we have the replica and its not needed
951 # then we add it to the "to be deleted" list.
952 for dnstr in current_rep_table:
953 if dnstr not in needed_rep_table:
954 delete_reps.add(dnstr)
956 DEBUG_FN('current %d needed %d delete %d' % (len(current_rep_table),
957 len(needed_rep_table), len(delete_reps)))
959 if delete_reps:
960 DEBUG('deleting these reps: %s' % delete_reps)
961 for dnstr in delete_reps:
962 del current_rep_table[dnstr]
964 # Now perform the scan of replicas we'll need
965 # and compare any current repsFrom against the
966 # connections
967 for n_rep in needed_rep_table.values():
969 # load any repsFrom and fsmo roles as we'll
970 # need them during connection translation
971 n_rep.load_repsFrom(self.samdb)
972 n_rep.load_fsmo_roles(self.samdb)
974 # Loop thru the existing repsFrom tupples (if any)
975 # XXX This is a list and could contain duplicates
976 # (multiple load_repsFrom calls)
977 for t_repsFrom in n_rep.rep_repsFrom:
979 # for each tuple t in n!repsFrom, let s be the nTDSDSA
980 # object such that s!objectGUID = t.uuidDsa
981 guidstr = str(t_repsFrom.source_dsa_obj_guid)
982 s_dsa = self.get_dsa_by_guidstr(guidstr)
984 # Source dsa is gone from config (strange)
985 # so cleanup stale repsFrom for unlisted DSA
986 if s_dsa is None:
987 logger.warning("repsFrom source DSA guid (%s) not found" %
988 guidstr)
989 t_repsFrom.to_be_deleted = True
990 continue
992 s_dnstr = s_dsa.dsa_dnstr
994 # Retrieve my DSAs connection object (if it exists)
995 # that specifies the fromServer equivalent to
996 # the DSA that is specified in the repsFrom source
997 connections = current_dsa.get_connection_by_from_dnstr(s_dnstr)
999 count = 0
1000 cn_conn = None
1002 for con in connections:
1003 if con.is_rodc_topology():
1004 continue
1005 cn_conn = con
1007 # Let (cn) be the nTDSConnection object such that (cn)
1008 # is a child of the local DC's nTDSDSA object and
1009 # (cn!fromServer = s) and (cn!options) does not contain
1010 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
1012 # KCC removes this repsFrom tuple if any of the following
1013 # is true:
1014 # cn = NULL.
1015 # [...]
1017 #XXX varying possible interpretations of rodc_topology
1018 if cn_conn is None:
1019 t_repsFrom.to_be_deleted = True
1020 continue
1022 # [...] KCC removes this repsFrom tuple if:
1024 # No NC replica of the NC "is present" on DSA that
1025 # would be source of replica
1027 # A writable replica of the NC "should be present" on
1028 # the local DC, but a partial replica "is present" on
1029 # the source DSA
1030 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
1032 if s_rep is None or not s_rep.is_present() or \
1033 (not n_rep.is_ro() and s_rep.is_partial()):
1035 t_repsFrom.to_be_deleted = True
1036 continue
1038 # If the KCC did not remove t from n!repsFrom, it updates t
1039 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
1041 # Loop thru connections and add implied repsFrom tuples
1042 # for each NTDSConnection under our local DSA if the
1043 # repsFrom is not already present
1044 for cn_conn in current_dsa.connect_table.values():
1046 implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn)
1047 if not implied:
1048 continue
1050 # Loop thru the existing repsFrom tupples (if any) and
1051 # if we already have a tuple for this connection then
1052 # no need to proceed to add. It will have been changed
1053 # to have the correct attributes above
1054 for t_repsFrom in n_rep.rep_repsFrom:
1055 guidstr = str(t_repsFrom.source_dsa_obj_guid)
1056 #XXX what?
1057 if s_dsa is self.get_dsa_by_guidstr(guidstr):
1058 s_dsa = None
1059 break
1061 if s_dsa is None:
1062 continue
1064 # Create a new RepsFromTo and proceed to modify
1065 # it according to specification
1066 t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
1068 t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
1070 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
1072 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
1074 # Add to our NC repsFrom as this is newly computed
1075 if t_repsFrom.is_modified():
1076 n_rep.rep_repsFrom.append(t_repsFrom)
1078 if opts.readonly:
1079 # Display any to be deleted or modified repsFrom
1080 text = n_rep.dumpstr_to_be_deleted()
1081 if text:
1082 logger.info("TO BE DELETED:\n%s" % text)
1083 text = n_rep.dumpstr_to_be_modified()
1084 if text:
1085 logger.info("TO BE MODIFIED:\n%s" % text)
1087 # Peform deletion from our tables but perform
1088 # no database modification
1089 n_rep.commit_repsFrom(self.samdb, ro=True)
1090 else:
1091 # Commit any modified repsFrom to the NC replica
1092 n_rep.commit_repsFrom(self.samdb)
1094 def merge_failed_links(self):
1095 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
1097 The KCC on a writable DC attempts to merge the link and connection
1098 failure information from bridgehead DCs in its own site to help it
1099 identify failed bridgehead DCs.
1101 Based on MS-ADTS 6.2.2.3.2 "Merge of kCCFailedLinks and kCCFailedLinks
1102 from Bridgeheads"
1104 :param ping: An oracle of current bridgehead availability
1105 :return: None
1107 # 1. Queries every bridgehead server in your site (other than yourself)
1108 # 2. For every ntDSConnection that references a server in a different
1109 # site merge all the failure info
1111 # XXX - not implemented yet
1112 if opts.attempt_live_connections:
1113 DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
1114 else:
1115 DEBUG_FN("skipping merge_failed_links() because it requires "
1116 "real network connections\n"
1117 "and we weren't asked to --attempt-live-connections")
1119 def setup_graph(self, part):
1120 """Set up an intersite graph
1122 An intersite graph has a Vertex for each site object, a
1123 MultiEdge for each SiteLink object, and a MutliEdgeSet for
1124 each siteLinkBridge object (or implied siteLinkBridge). It
1125 reflects the intersite topology in a slightly more abstract
1126 graph form.
1128 Roughly corresponds to MS-ADTS 6.2.2.3.4.3
1130 :param part: a Partition object
1131 :returns: an InterSiteGraph object
1133 guid_to_vertex = {}
1134 # Create graph
1135 g = IntersiteGraph()
1136 # Add vertices
1137 for site_guid, site in self.site_table.items():
1138 vertex = Vertex(site, part)
1139 vertex.guid = site_guid
1140 vertex.ndrpacked_guid = ndr_pack(site.site_guid)
1141 g.vertices.add(vertex)
1143 if not guid_to_vertex.get(site_guid):
1144 guid_to_vertex[site_guid] = []
1146 guid_to_vertex[site_guid].append(vertex)
1148 connected_vertices = set()
1149 for transport_guid, transport in self.transport_table.items():
1150 # Currently only ever "IP"
1151 if transport.name != 'IP':
1152 DEBUG_FN("setup_graph is ignoring transport %s" %
1153 transport.name)
1154 continue
1155 for site_link_dn, site_link in self.sitelink_table.items():
1156 new_edge = create_edge(transport_guid, site_link,
1157 guid_to_vertex)
1158 connected_vertices.update(new_edge.vertices)
1159 g.edges.add(new_edge)
1161 # If 'Bridge all site links' is enabled and Win2k3 bridges required
1162 # is not set
1163 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1164 # No documentation for this however, ntdsapi.h appears to have:
1165 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1166 if (((self.my_site.site_options & 0x00000002) == 0
1167 and (self.my_site.site_options & 0x00001000) == 0)):
1168 g.edge_set.add(create_auto_edge_set(g, transport_guid))
1169 else:
1170 # TODO get all site link bridges
1171 for site_link_bridge in []:
1172 g.edge_set.add(create_edge_set(g, transport_guid,
1173 site_link_bridge))
1175 g.connected_vertices = connected_vertices
1177 #be less verbose in dot file output unless --debug
1178 do_dot_files = opts.dot_files and opts.debug
1179 dot_edges = []
1180 for edge in g.edges:
1181 for a, b in itertools.combinations(edge.vertices, 2):
1182 dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
1183 verify_properties = ()
1184 verify_and_dot('site_edges', dot_edges, directed=False,
1185 label=self.my_dsa_dnstr,
1186 properties=verify_properties, debug=DEBUG,
1187 verify=opts.verify,
1188 dot_files=do_dot_files)
1190 return g
1192 def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
1193 """Get a bridghead DC for a site.
1195 Part of MS-ADTS 6.2.2.3.4.4
1197 :param site: site object representing for which a bridgehead
1198 DC is desired.
1199 :param part: crossRef for NC to replicate.
1200 :param transport: interSiteTransport object for replication
1201 traffic.
1202 :param partial_ok: True if a DC containing a partial
1203 replica or a full replica will suffice, False if only
1204 a full replica will suffice.
1205 :param detect_failed: True to detect failed DCs and route
1206 replication traffic around them, False to assume no DC
1207 has failed.
1208 :return: dsa object for the bridgehead DC or None
1211 bhs = self.get_all_bridgeheads(site, part, transport,
1212 partial_ok, detect_failed)
1213 if len(bhs) == 0:
1214 DEBUG_MAGENTA("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
1215 site.site_dnstr)
1216 return None
1217 else:
1218 DEBUG_GREEN("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
1219 (site.site_dnstr, bhs[0].dsa_dnstr))
1220 return bhs[0]
1222 def get_all_bridgeheads(self, site, part, transport,
1223 partial_ok, detect_failed):
1224 """Get all bridghead DCs on a site satisfying the given criteria
1226 Part of MS-ADTS 6.2.2.3.4.4
1228 :param site: site object representing the site for which
1229 bridgehead DCs are desired.
1230 :param part: partition for NC to replicate.
1231 :param transport: interSiteTransport object for
1232 replication traffic.
1233 :param partial_ok: True if a DC containing a partial
1234 replica or a full replica will suffice, False if
1235 only a full replica will suffice.
1236 :param detect_failed: True to detect failed DCs and route
1237 replication traffic around them, FALSE to assume
1238 no DC has failed.
1239 :return: list of dsa object for available bridgehead DCs
1242 bhs = []
1244 logger.debug("get_all_bridgeheads: %s" % transport.name)
1245 if 'Site-5' in site.site_dnstr:
1246 DEBUG_RED("get_all_bridgeheads with %s, part%s, partial_ok %s"
1247 " detect_failed %s" % (site.site_dnstr, part.partstr,
1248 partial_ok, detect_failed))
1249 logger.debug(site.rw_dsa_table)
1250 for dsa in site.rw_dsa_table.values():
1252 pdnstr = dsa.get_parent_dnstr()
1254 # IF t!bridgeheadServerListBL has one or more values and
1255 # t!bridgeheadServerListBL does not contain a reference
1256 # to the parent object of dc then skip dc
1257 if ((len(transport.bridgehead_list) != 0 and
1258 pdnstr not in transport.bridgehead_list)):
1259 continue
1261 # IF dc is in the same site as the local DC
1262 # IF a replica of cr!nCName is not in the set of NC replicas
1263 # that "should be present" on dc or a partial replica of the
1264 # NC "should be present" but partialReplicasOkay = FALSE
1265 # Skip dc
1266 if self.my_site.same_site(dsa):
1267 needed, ro, partial = part.should_be_present(dsa)
1268 if not needed or (partial and not partial_ok):
1269 continue
1270 rep = dsa.get_current_replica(part.nc_dnstr)
1272 # ELSE
1273 # IF an NC replica of cr!nCName is not in the set of NC
1274 # replicas that "are present" on dc or a partial replica of
1275 # the NC "is present" but partialReplicasOkay = FALSE
1276 # Skip dc
1277 else:
1278 rep = dsa.get_current_replica(part.nc_dnstr)
1279 if rep is None or (rep.is_partial() and not partial_ok):
1280 continue
1282 # IF AmIRODC() and cr!nCName corresponds to default NC then
1283 # Let dsaobj be the nTDSDSA object of the dc
1284 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1285 # Skip dc
1286 if self.my_dsa.is_ro() and rep is not None and rep.is_default():
1287 if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1288 continue
1290 # IF t!name != "IP" and the parent object of dc has no value for
1291 # the attribute specified by t!transportAddressAttribute
1292 # Skip dc
1293 if transport.name != "IP":
1294 # MS tech specification says we retrieve the named
1295 # attribute in "transportAddressAttribute" from the parent
1296 # of the DSA object
1297 try:
1298 attrs = [transport.address_attr]
1300 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
1301 attrs=attrs)
1302 except ldb.LdbError, (enum, estr):
1303 continue
1305 msg = res[0]
1306 if transport.address_attr not in msg:
1307 continue
1308 #XXX nastr is NEVER USED. It will be removed.
1309 nastr = str(msg[transport.address_attr][0])
1311 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1312 # Skip dc
1313 if self.is_bridgehead_failed(dsa, detect_failed):
1314 DEBUG("bridgehead is failed")
1315 continue
1317 logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
1318 bhs.append(dsa)
1320 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1321 # s!options
1322 # SORT bhs such that all GC servers precede DCs that are not GC
1323 # servers, and otherwise by ascending objectGUID
1324 # ELSE
1325 # SORT bhs in a random order
1326 if site.is_random_bridgehead_disabled():
1327 bhs.sort(sort_dsa_by_gc_and_guid)
1328 else:
1329 random.shuffle(bhs)
1330 DEBUG_YELLOW(bhs)
1331 return bhs
1333 def is_bridgehead_failed(self, dsa, detect_failed):
1334 """Determine whether a given DC is known to be in a failed state
1336 :param dsa: the bridgehead to test
1337 :param detect_failed: True to really check, False to assume no failure
1338 :return: True if and only if the DC should be considered failed
1340 Here we DEPART from the pseudo code spec which appears to be
1341 wrong. It says, in full:
1343 /***** BridgeheadDCFailed *****/
1344 /* Determine whether a given DC is known to be in a failed state.
1345 * IN: objectGUID - objectGUID of the DC's nTDSDSA object.
1346 * IN: detectFailedDCs - TRUE if and only failed DC detection is
1347 * enabled.
1348 * RETURNS: TRUE if and only if the DC should be considered to be in a
1349 * failed state.
1351 BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
1353 IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
1354 the options attribute of the site settings object for the local
1355 DC's site
1356 RETURN FALSE
1357 ELSEIF a tuple z exists in the kCCFailedLinks or
1358 kCCFailedConnections variables such that z.UUIDDsa =
1359 objectGUID, z.FailureCount > 1, and the current time -
1360 z.TimeFirstFailure > 2 hours
1361 RETURN TRUE
1362 ELSE
1363 RETURN detectFailedDCs
1364 ENDIF
1367 where you will see detectFailedDCs is not behaving as
1368 advertised -- it is acting as a default return code in the
1369 event that a failure is not detected, not a switch turning
1370 detection on or off. Elsewhere the documentation seems to
1371 concur with the comment rather than the code.
1373 if not detect_failed:
1374 return False
1376 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1377 # When DETECT_STALE_DISABLED, we can never know of if
1378 # it's in a failed state
1379 if self.my_site.site_options & 0x00000008:
1380 return False
1382 return self.is_stale_link_connection(dsa)
1384 def create_connection(self, part, rbh, rsite, transport,
1385 lbh, lsite, link_opt, link_sched,
1386 partial_ok, detect_failed):
1387 """Create an nTDSConnection object with the given parameters
1388 if one does not already exist.
1390 :param part: crossRef object for the NC to replicate.
1391 :param rbh: nTDSDSA object for DC to act as the
1392 IDL_DRSGetNCChanges server (which is in a site other
1393 than the local DC's site).
1394 :param rsite: site of the rbh
1395 :param transport: interSiteTransport object for the transport
1396 to use for replication traffic.
1397 :param lbh: nTDSDSA object for DC to act as the
1398 IDL_DRSGetNCChanges client (which is in the local DC's site).
1399 :param lsite: site of the lbh
1400 :param link_opt: Replication parameters (aggregated siteLink options,
1401 etc.)
1402 :param link_sched: Schedule specifying the times at which
1403 to begin replicating.
1404 :partial_ok: True if bridgehead DCs containing partial
1405 replicas of the NC are acceptable.
1406 :param detect_failed: True to detect failed DCs and route
1407 replication traffic around them, FALSE to assume no DC
1408 has failed.
1410 rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
1411 partial_ok, False)
1412 rbh_table = {x.dsa_dnstr: x for x in rbhs_all}
1414 DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all),
1415 [x.dsa_dnstr for x in rbhs_all]))
1417 # MS-TECH says to compute rbhs_avail but then doesn't use it
1418 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1419 # partial_ok, detect_failed)
1421 lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
1422 partial_ok, False)
1423 if lbh.is_ro():
1424 lbhs_all.append(lbh)
1426 DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all),
1427 [x.dsa_dnstr for x in lbhs_all]))
1429 # MS-TECH says to compute lbhs_avail but then doesn't use it
1430 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1431 # partial_ok, detect_failed)
1433 # FOR each nTDSConnection object cn such that the parent of cn is
1434 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1435 for ldsa in lbhs_all:
1436 for cn in ldsa.connect_table.values():
1438 rdsa = rbh_table.get(cn.from_dnstr)
1439 if rdsa is None:
1440 continue
1442 DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr)
1443 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1444 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1445 # cn!transportType references t
1446 if ((cn.is_generated() and
1447 not cn.is_rodc_topology() and
1448 cn.transport_guid == transport.guid)):
1450 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1451 # cn!options and cn!schedule != sch
1452 # Perform an originating update to set cn!schedule to
1453 # sched
1454 if ((not cn.is_user_owned_schedule() and
1455 not cn.is_equivalent_schedule(link_sched))):
1456 cn.schedule = link_sched
1457 cn.set_modified(True)
1459 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1460 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1461 if cn.is_override_notify_default() and \
1462 cn.is_use_notify():
1464 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1465 # ri.Options
1466 # Perform an originating update to clear bits
1467 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1468 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1469 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
1470 cn.options &= \
1471 ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1472 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1473 cn.set_modified(True)
1475 # ELSE
1476 else:
1478 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1479 # ri.Options
1480 # Perform an originating update to set bits
1481 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1482 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1483 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1484 cn.options |= \
1485 (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1486 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1487 cn.set_modified(True)
1489 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1490 if cn.is_twoway_sync():
1492 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1493 # ri.Options
1494 # Perform an originating update to clear bit
1495 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1496 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
1497 cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1498 cn.set_modified(True)
1500 # ELSE
1501 else:
1503 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1504 # ri.Options
1505 # Perform an originating update to set bit
1506 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1507 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1508 cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1509 cn.set_modified(True)
1511 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1512 # in cn!options
1513 if cn.is_intersite_compression_disabled():
1515 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1516 # in ri.Options
1517 # Perform an originating update to clear bit
1518 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1519 # cn!options
1520 if ((link_opt &
1521 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0):
1522 cn.options &= \
1523 ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1524 cn.set_modified(True)
1526 # ELSE
1527 else:
1528 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1529 # ri.Options
1530 # Perform an originating update to set bit
1531 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1532 # cn!options
1533 if ((link_opt &
1534 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
1535 cn.options |= \
1536 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1537 cn.set_modified(True)
1539 # Display any modified connection
1540 if opts.readonly:
1541 if cn.to_be_modified:
1542 logger.info("TO BE MODIFIED:\n%s" % cn)
1544 ldsa.commit_connections(self.samdb, ro=True)
1545 else:
1546 ldsa.commit_connections(self.samdb)
1547 # ENDFOR
1549 valid_connections = 0
1551 # FOR each nTDSConnection object cn such that cn!parent is
1552 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1553 for ldsa in lbhs_all:
1554 for cn in ldsa.connect_table.values():
1556 rdsa = rbh_table.get(cn.from_dnstr)
1557 if rdsa is None:
1558 continue
1560 DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr)
1562 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1563 # cn!transportType references t) and
1564 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1565 if (((not cn.is_generated() or
1566 cn.transport_guid == transport.guid) and
1567 not cn.is_rodc_topology())):
1569 # LET rguid be the objectGUID of the nTDSDSA object
1570 # referenced by cn!fromServer
1571 # LET lguid be (cn!parent)!objectGUID
1573 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1574 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1575 # Increment cValidConnections by 1
1576 if ((not self.is_bridgehead_failed(rdsa, detect_failed) and
1577 not self.is_bridgehead_failed(ldsa, detect_failed))):
1578 valid_connections += 1
1580 # IF keepConnections does not contain cn!objectGUID
1581 # APPEND cn!objectGUID to keepConnections
1582 self.kept_connections.add(cn)
1584 # ENDFOR
1585 DEBUG_RED("valid connections %d" % valid_connections)
1586 DEBUG("kept_connections:\n%s" % (self.kept_connections,))
1587 # IF cValidConnections = 0
1588 if valid_connections == 0:
1590 # LET opt be NTDSCONN_OPT_IS_GENERATED
1591 opt = dsdb.NTDSCONN_OPT_IS_GENERATED
1593 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1594 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1595 # NTDSCONN_OPT_USE_NOTIFY in opt
1596 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1597 opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1598 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1600 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1601 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1602 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1603 opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1605 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1606 # ri.Options
1607 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1608 if ((link_opt &
1609 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
1610 opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1612 # Perform an originating update to create a new nTDSConnection
1613 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1614 # cn!options = opt, cn!transportType is a reference to t,
1615 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1616 DEBUG_FN("new connection, KCC dsa: %s" % self.my_dsa.dsa_dnstr)
1617 cn = lbh.new_connection(opt, 0, transport,
1618 rbh.dsa_dnstr, link_sched)
1620 # Display any added connection
1621 if opts.readonly:
1622 if cn.to_be_added:
1623 logger.info("TO BE ADDED:\n%s" % cn)
1625 lbh.commit_connections(self.samdb, ro=True)
1626 else:
1627 lbh.commit_connections(self.samdb)
1629 # APPEND cn!objectGUID to keepConnections
1630 self.kept_connections.add(cn)
1632 def add_transports(self, vertex, local_vertex, graph, detect_failed):
1633 """Build a Vertex's transport lists
1635 Each vertex has accept_red_red and accept_black lists that
1636 list what transports they accept under various conditions. The
1637 only transport that is ever accepted is IP, and a dummy extra
1638 transport called "EDGE_TYPE_ALL".
1640 Part of MS-ADTS 6.2.2.3.4.3 -- ColorVertices
1642 :param vertex: the remote vertex we are thinking about
1643 :param local_vertex: the vertex relating to the local site.
1644 :param graph: the intersite graph
1645 :param detect_failed: whether to detect failed links
1646 :return: True if some bridgeheads were not found
1648 # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
1649 # here, but using vertex seems to make more sense. That is,
1650 # the docs want this:
1652 #bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1653 # local_vertex.is_black(), detect_failed)
1655 # TODO WHY?????
1657 vertex.accept_red_red = []
1658 vertex.accept_black = []
1659 found_failed = False
1660 for t_guid, transport in self.transport_table.items():
1661 if transport.name != 'IP':
1662 #XXX well this is cheating a bit
1663 logging.warning("WARNING: we are ignoring a transport named %r"
1664 % transport.name)
1665 continue
1667 # FLAG_CR_NTDS_DOMAIN 0x00000002
1668 if ((vertex.is_red() and transport.name != "IP" and
1669 vertex.part.system_flags & 0x00000002)):
1670 continue
1672 if vertex not in graph.connected_vertices:
1673 continue
1675 partial_replica_okay = vertex.is_black()
1676 bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1677 partial_replica_okay, detect_failed)
1678 if bh is None:
1679 found_failed = True
1680 continue
1682 vertex.accept_red_red.append(t_guid)
1683 vertex.accept_black.append(t_guid)
1685 # Add additional transport to allow another run of Dijkstra
1686 vertex.accept_red_red.append("EDGE_TYPE_ALL")
1687 vertex.accept_black.append("EDGE_TYPE_ALL")
1689 return found_failed
1691 def create_connections(self, graph, part, detect_failed):
1692 """Construct an NC replica graph for the NC identified by
1693 the given crossRef, then create any additional nTDSConnection
1694 objects required.
1696 :param graph: site graph.
1697 :param part: crossRef object for NC.
1698 :param detect_failed: True to detect failed DCs and route
1699 replication traffic around them, False to assume no DC
1700 has failed.
1702 Modifies self.kept_connections by adding any connections
1703 deemed to be "in use".
1705 ::returns: (all_connected, found_failed_dc)
1706 (all_connected) True if the resulting NC replica graph
1707 connects all sites that need to be connected.
1708 (found_failed_dc) True if one or more failed DCs were
1709 detected.
1711 all_connected = True
1712 found_failed = False
1714 logger.debug("create_connections(): enter\n"
1715 "\tpartdn=%s\n\tdetect_failed=%s" %
1716 (part.nc_dnstr, detect_failed))
1718 # XXX - This is a highly abbreviated function from the MS-TECH
1719 # ref. It creates connections between bridgeheads to all
1720 # sites that have appropriate replicas. Thus we are not
1721 # creating a minimum cost spanning tree but instead
1722 # producing a fully connected tree. This should produce
1723 # a full (albeit not optimal cost) replication topology.
1725 my_vertex = Vertex(self.my_site, part)
1726 my_vertex.color_vertex()
1728 for v in graph.vertices:
1729 v.color_vertex()
1730 if self.add_transports(v, my_vertex, graph, False):
1731 found_failed = True
1733 # No NC replicas for this NC in the site of the local DC,
1734 # so no nTDSConnection objects need be created
1735 if my_vertex.is_white():
1736 return all_connected, found_failed
1738 edge_list, n_components = get_spanning_tree_edges(graph,
1739 self.my_site,
1740 label=part.partstr)
1742 logger.debug("%s Number of components: %d" %
1743 (part.nc_dnstr, n_components))
1744 if n_components > 1:
1745 all_connected = False
1747 # LET partialReplicaOkay be TRUE if and only if
1748 # localSiteVertex.Color = COLOR.BLACK
1749 partial_ok = my_vertex.is_black()
1751 # Utilize the IP transport only for now
1752 transport = self.ip_transport
1754 DEBUG("edge_list %s" % edge_list)
1755 for e in edge_list:
1756 # XXX more accurate comparison?
1757 if e.directed and e.vertices[0].site is self.my_site:
1758 continue
1760 if e.vertices[0].site is self.my_site:
1761 rsite = e.vertices[1].site
1762 else:
1763 rsite = e.vertices[0].site
1765 # We don't make connections to our own site as that
1766 # is intrasite topology generator's job
1767 if rsite is self.my_site:
1768 DEBUG("rsite is my_site")
1769 continue
1771 # Determine bridgehead server in remote site
1772 rbh = self.get_bridgehead(rsite, part, transport,
1773 partial_ok, detect_failed)
1774 if rbh is None:
1775 continue
1777 # RODC acts as an BH for itself
1778 # IF AmIRODC() then
1779 # LET lbh be the nTDSDSA object of the local DC
1780 # ELSE
1781 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1782 # cr, t, partialReplicaOkay, detectFailedDCs)
1783 if self.my_dsa.is_ro():
1784 lsite = self.my_site
1785 lbh = self.my_dsa
1786 else:
1787 lsite = self.my_site
1788 lbh = self.get_bridgehead(lsite, part, transport,
1789 partial_ok, detect_failed)
1790 # TODO
1791 if lbh is None:
1792 DEBUG_RED("DISASTER! lbh is None")
1793 return False, True
1795 DEBUG_CYAN("SITES")
1796 print lsite, rsite
1797 DEBUG_BLUE("vertices")
1798 print e.vertices
1799 DEBUG_BLUE("bridgeheads")
1800 print lbh, rbh
1801 DEBUG_BLUE("-" * 70)
1803 sitelink = e.site_link
1804 if sitelink is None:
1805 link_opt = 0x0
1806 link_sched = None
1807 else:
1808 link_opt = sitelink.options
1809 link_sched = sitelink.schedule
1811 self.create_connection(part, rbh, rsite, transport,
1812 lbh, lsite, link_opt, link_sched,
1813 partial_ok, detect_failed)
1815 return all_connected, found_failed
1817 def create_intersite_connections(self):
1818 """Computes an NC replica graph for each NC replica that "should be
1819 present" on the local DC or "is present" on any DC in the same site
1820 as the local DC. For each edge directed to an NC replica on such a
1821 DC from an NC replica on a DC in another site, the KCC creates an
1822 nTDSConnection object to imply that edge if one does not already
1823 exist.
1825 Modifies self.kept_connections - A set of nTDSConnection
1826 objects for edges that are directed
1827 to the local DC's site in one or more NC replica graphs.
1829 returns: True if spanning trees were created for all NC replica
1830 graphs, otherwise False.
1832 all_connected = True
1833 self.kept_connections = set()
1835 # LET crossRefList be the set containing each object o of class
1836 # crossRef such that o is a child of the CN=Partitions child of the
1837 # config NC
1839 # FOR each crossRef object cr in crossRefList
1840 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1841 # is clear in cr!systemFlags, skip cr.
1842 # LET g be the GRAPH return of SetupGraph()
1844 for part in self.part_table.values():
1846 if not part.is_enabled():
1847 continue
1849 if part.is_foreign():
1850 continue
1852 graph = self.setup_graph(part)
1854 # Create nTDSConnection objects, routing replication traffic
1855 # around "failed" DCs.
1856 found_failed = False
1858 connected, found_failed = self.create_connections(graph,
1859 part, True)
1861 DEBUG("with detect_failed: connected %s Found failed %s" %
1862 (connected, found_failed))
1863 if not connected:
1864 all_connected = False
1866 if found_failed:
1867 # One or more failed DCs preclude use of the ideal NC
1868 # replica graph. Add connections for the ideal graph.
1869 self.create_connections(graph, part, False)
1871 return all_connected
1873 def intersite(self):
1874 """The head method for generating the inter-site KCC replica
1875 connection graph and attendant nTDSConnection objects
1876 in the samdb.
1878 Produces self.kept_connections set of NTDS Connections
1879 that should be kept during subsequent pruning process.
1881 ::return (True or False): (True) if the produced NC replica
1882 graph connects all sites that need to be connected
1885 # Retrieve my DSA
1886 mydsa = self.my_dsa
1887 mysite = self.my_site
1888 all_connected = True
1890 logger.debug("intersite(): enter")
1892 # Determine who is the ISTG
1893 if opts.readonly:
1894 mysite.select_istg(self.samdb, mydsa, ro=True)
1895 else:
1896 mysite.select_istg(self.samdb, mydsa, ro=False)
1898 # Test whether local site has topology disabled
1899 if mysite.is_intersite_topology_disabled():
1900 logger.debug("intersite(): exit disabled all_connected=%d" %
1901 all_connected)
1902 return all_connected
1904 if not mydsa.is_istg():
1905 logger.debug("intersite(): exit not istg all_connected=%d" %
1906 all_connected)
1907 return all_connected
1909 self.merge_failed_links()
1911 # For each NC with an NC replica that "should be present" on the
1912 # local DC or "is present" on any DC in the same site as the
1913 # local DC, the KCC constructs a site graph--a precursor to an NC
1914 # replica graph. The site connectivity for a site graph is defined
1915 # by objects of class interSiteTransport, siteLink, and
1916 # siteLinkBridge in the config NC.
1918 all_connected = self.create_intersite_connections()
1920 logger.debug("intersite(): exit all_connected=%d" % all_connected)
1921 return all_connected
1923 def update_rodc_connection(self):
1924 """Runs when the local DC is an RODC and updates the RODC NTFRS
1925 connection object.
1927 # Given an nTDSConnection object cn1, such that cn1.options contains
1928 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1929 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1930 # that the following is true:
1932 # cn1.fromServer = cn2.fromServer
1933 # cn1.schedule = cn2.schedule
1935 # If no such cn2 can be found, cn1 is not modified.
1936 # If no such cn1 can be found, nothing is modified by this task.
1938 if not self.my_dsa.is_ro():
1939 return
1941 all_connections = self.my_dsa.connect_table.values()
1942 ro_connections = [x for x in all_connections if x.is_rodc_topology()]
1943 rw_connections = [x for x in all_connections
1944 if x not in ro_connections]
1946 # XXX here we are dealing with multiple RODC_TOPO connections,
1947 # if they exist. It is not clear whether the spec means that
1948 # or if it ever arises.
1949 if rw_connections and ro_connections:
1950 for con in ro_connections:
1951 cn2 = rw_connections[0]
1952 con.from_dnstr = cn2.from_dnstr
1953 con.schedule = cn2.schedule
1954 con.to_be_modified = True
1956 self.my_dsa.commit_connections(self.samdb, ro=opts.readonly)
1958 def intrasite_max_node_edges(self, node_count):
1959 """Returns the maximum number of edges directed to a node in
1960 the intrasite replica graph.
1962 The KCC does not create more
1963 than 50 edges directed to a single DC. To optimize replication,
1964 we compute that each node should have n+2 total edges directed
1965 to it such that (n) is the smallest non-negative integer
1966 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1968 (If the number of edges is m (i.e. n + 2), that is the same as
1969 2 * m*m - 2 * m + 3).
1971 edges n nodecount
1972 2 0 7
1973 3 1 15
1974 4 2 27
1975 5 3 43
1977 50 48 4903
1979 :param node_count: total number of nodes in the replica graph
1981 The intention is that there should be no more than 3 hops
1982 between any two DSAs at a site. With up to 7 nodes the 2 edges
1983 of the ring are enough; any configuration of extra edges with
1984 8 nodes will be enough. It is less clear that the 3 hop
1985 guarantee holds at e.g. 15 nodes in degenerate cases, but
1986 those are quite unlikely given the extra edges are randomly
1987 arranged.
1989 n = 0
1990 while True:
1991 if node_count <= (2 * (n * n) + (6 * n) + 7):
1992 break
1993 n = n + 1
1994 n = n + 2
1995 if n < 50:
1996 return n
1997 return 50
1999 def construct_intrasite_graph(self, site_local, dc_local,
2000 nc_x, gc_only, detect_stale):
2001 # [MS-ADTS] 6.2.2.2
2002 # We're using the MS notation names here to allow
2003 # correlation back to the published algorithm.
2005 # nc_x - naming context (x) that we are testing if it
2006 # "should be present" on the local DC
2007 # f_of_x - replica (f) found on a DC (s) for NC (x)
2008 # dc_s - DC where f_of_x replica was found
2009 # dc_local - local DC that potentially needs a replica
2010 # (f_of_x)
2011 # r_list - replica list R
2012 # p_of_x - replica (p) is partial and found on a DC (s)
2013 # for NC (x)
2014 # l_of_x - replica (l) is the local replica for NC (x)
2015 # that should appear on the local DC
2016 # r_len = is length of replica list |R|
2018 # If the DSA doesn't need a replica for this
2019 # partition (NC x) then continue
2020 needed, ro, partial = nc_x.should_be_present(dc_local)
2022 DEBUG_YELLOW("construct_intrasite_graph(): enter" +
2023 "\n\tgc_only=%d" % gc_only +
2024 "\n\tdetect_stale=%d" % detect_stale +
2025 "\n\tneeded=%s" % needed +
2026 "\n\tro=%s" % ro +
2027 "\n\tpartial=%s" % partial +
2028 "\n%s" % nc_x)
2030 if not needed:
2031 DEBUG_RED("%s lacks 'should be present' status, "
2032 "aborting construct_intersite_graph!" %
2033 nc_x.nc_dnstr)
2034 return
2036 # Create a NCReplica that matches what the local replica
2037 # should say. We'll use this below in our r_list
2038 l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
2039 nc_x.nc_dnstr)
2041 l_of_x.identify_by_basedn(self.samdb)
2043 l_of_x.rep_partial = partial
2044 l_of_x.rep_ro = ro
2046 # Add this replica that "should be present" to the
2047 # needed replica table for this DSA
2048 dc_local.add_needed_replica(l_of_x)
2050 # Replica list
2052 # Let R be a sequence containing each writable replica f of x
2053 # such that f "is present" on a DC s satisfying the following
2054 # criteria:
2056 # * s is a writable DC other than the local DC.
2058 # * s is in the same site as the local DC.
2060 # * If x is a read-only full replica and x is a domain NC,
2061 # then the DC's functional level is at least
2062 # DS_BEHAVIOR_WIN2008.
2064 # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
2065 # in the options attribute of the site settings object for
2066 # the local DC's site, or no tuple z exists in the
2067 # kCCFailedLinks or kCCFailedConnections variables such
2068 # that z.UUIDDsa is the objectGUID of the nTDSDSA object
2069 # for s, z.FailureCount > 0, and the current time -
2070 # z.TimeFirstFailure > 2 hours.
2072 r_list = []
2074 # We'll loop thru all the DSAs looking for
2075 # writeable NC replicas that match the naming
2076 # context dn for (nc_x)
2078 for dc_s in self.my_site.dsa_table.values():
2079 # If this partition (nc_x) doesn't appear as a
2080 # replica (f_of_x) on (dc_s) then continue
2081 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2082 continue
2084 # Pull out the NCReplica (f) of (x) with the dn
2085 # that matches NC (x) we are examining.
2086 f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2088 # Replica (f) of NC (x) must be writable
2089 if f_of_x.is_ro():
2090 continue
2092 # Replica (f) of NC (x) must satisfy the
2093 # "is present" criteria for DC (s) that
2094 # it was found on
2095 if not f_of_x.is_present():
2096 continue
2098 # DC (s) must be a writable DSA other than
2099 # my local DC. In other words we'd only replicate
2100 # from other writable DC
2101 if dc_s.is_ro() or dc_s is dc_local:
2102 continue
2104 # Certain replica graphs are produced only
2105 # for global catalogs, so test against
2106 # method input parameter
2107 if gc_only and not dc_s.is_gc():
2108 continue
2110 # DC (s) must be in the same site as the local DC
2111 # as this is the intra-site algorithm. This is
2112 # handled by virtue of placing DSAs in per
2113 # site objects (see enclosing for() loop)
2115 # If NC (x) is intended to be read-only full replica
2116 # for a domain NC on the target DC then the source
2117 # DC should have functional level at minimum WIN2008
2119 # Effectively we're saying that in order to replicate
2120 # to a targeted RODC (which was introduced in Windows 2008)
2121 # then we have to replicate from a DC that is also minimally
2122 # at that level.
2124 # You can also see this requirement in the MS special
2125 # considerations for RODC which state that to deploy
2126 # an RODC, at least one writable domain controller in
2127 # the domain must be running Windows Server 2008
2128 if ro and not partial and nc_x.nc_type == NCType.domain:
2129 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2130 continue
2132 # If we haven't been told to turn off stale connection
2133 # detection and this dsa has a stale connection then
2134 # continue
2135 if detect_stale and self.is_stale_link_connection(dc_s):
2136 continue
2138 # Replica meets criteria. Add it to table indexed
2139 # by the GUID of the DC that it appears on
2140 r_list.append(f_of_x)
2142 # If a partial (not full) replica of NC (x) "should be present"
2143 # on the local DC, append to R each partial replica (p of x)
2144 # such that p "is present" on a DC satisfying the same
2145 # criteria defined above for full replica DCs.
2147 # XXX This loop and the previous one differ only in whether
2148 # the replica is partial or not. here we only accept partial
2149 # (because we're partial); before we only accepted full. Order
2150 # doen't matter (the list is sorted a few lines down) so these
2151 # loops could easily be merged. Or this could be a helper
2152 # function.
2154 if partial:
2155 # Now we loop thru all the DSAs looking for
2156 # partial NC replicas that match the naming
2157 # context dn for (NC x)
2158 for dc_s in self.my_site.dsa_table.values():
2160 # If this partition NC (x) doesn't appear as a
2161 # replica (p) of NC (x) on the dsa DC (s) then
2162 # continue
2163 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2164 continue
2166 # Pull out the NCReplica with the dn that
2167 # matches NC (x) we are examining.
2168 p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2170 # Replica (p) of NC (x) must be partial
2171 if not p_of_x.is_partial():
2172 continue
2174 # Replica (p) of NC (x) must satisfy the
2175 # "is present" criteria for DC (s) that
2176 # it was found on
2177 if not p_of_x.is_present():
2178 continue
2180 # DC (s) must be a writable DSA other than
2181 # my DSA. In other words we'd only replicate
2182 # from other writable DSA
2183 if dc_s.is_ro() or dc_s is dc_local:
2184 continue
2186 # Certain replica graphs are produced only
2187 # for global catalogs, so test against
2188 # method input parameter
2189 if gc_only and not dc_s.is_gc():
2190 continue
2192 # If we haven't been told to turn off stale connection
2193 # detection and this dsa has a stale connection then
2194 # continue
2195 if detect_stale and self.is_stale_link_connection(dc_s):
2196 continue
2198 # Replica meets criteria. Add it to table indexed
2199 # by the GUID of the DSA that it appears on
2200 r_list.append(p_of_x)
2202 # Append to R the NC replica that "should be present"
2203 # on the local DC
2204 r_list.append(l_of_x)
2206 r_list.sort(sort_replica_by_dsa_guid)
2207 r_len = len(r_list)
2209 max_node_edges = self.intrasite_max_node_edges(r_len)
2211 # Add a node for each r_list element to the replica graph
2212 graph_list = []
2213 for rep in r_list:
2214 node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
2215 graph_list.append(node)
2217 # For each r(i) from (0 <= i < |R|-1)
2218 i = 0
2219 while i < (r_len-1):
2220 # Add an edge from r(i) to r(i+1) if r(i) is a full
2221 # replica or r(i+1) is a partial replica
2222 if not r_list[i].is_partial() or r_list[i+1].is_partial():
2223 graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)
2225 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2226 # replica or ri is a partial replica.
2227 if not r_list[i+1].is_partial() or r_list[i].is_partial():
2228 graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
2229 i = i + 1
2231 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2232 # or r0 is a partial replica.
2233 if not r_list[r_len-1].is_partial() or r_list[0].is_partial():
2234 graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr)
2236 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2237 # r|R|-1 is a partial replica.
2238 if not r_list[0].is_partial() or r_list[r_len-1].is_partial():
2239 graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr)
2241 DEBUG("r_list is length %s" % len(r_list))
2242 DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr))
2243 for x in r_list))
2245 do_dot_files = opts.dot_files and opts.debug
2246 if opts.verify or do_dot_files:
2247 dot_edges = []
2248 dot_vertices = set()
2249 for v1 in graph_list:
2250 dot_vertices.add(v1.dsa_dnstr)
2251 for v2 in v1.edge_from:
2252 dot_edges.append((v2, v1.dsa_dnstr))
2253 dot_vertices.add(v2)
2255 verify_properties = ('connected', 'directed_double_ring_or_small')
2256 verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
2257 label='%s__%s__%s' % (site_local.site_dnstr,
2258 nctype_lut[nc_x.nc_type],
2259 nc_x.nc_dnstr),
2260 properties=verify_properties, debug=DEBUG,
2261 verify=opts.verify,
2262 dot_files=do_dot_files, directed=True)
2264 # For each existing nTDSConnection object implying an edge
2265 # from rj of R to ri such that j != i, an edge from rj to ri
2266 # is not already in the graph, and the total edges directed
2267 # to ri is less than n+2, the KCC adds that edge to the graph.
2268 for vertex in graph_list:
2269 dsa = self.my_site.dsa_table[vertex.dsa_dnstr]
2270 for connect in dsa.connect_table.values():
2271 remote = connect.from_dnstr
2272 if remote in self.my_site.dsa_table:
2273 vertex.add_edge_from(remote)
2275 DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list))
2276 DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list))
2278 for tnode in graph_list:
2279 # To optimize replication latency in sites with many NC
2280 # replicas, the KCC adds new edges directed to ri to bring
2281 # the total edges to n+2, where the NC replica rk of R
2282 # from which the edge is directed is chosen at random such
2283 # that k != i and an edge from rk to ri is not already in
2284 # the graph.
2286 # Note that the KCC tech ref does not give a number for
2287 # the definition of "sites with many NC replicas". At a
2288 # bare minimum to satisfy n+2 edges directed at a node we
2289 # have to have at least three replicas in |R| (i.e. if n
2290 # is zero then at least replicas from two other graph
2291 # nodes may direct edges to us).
2292 if r_len >= 3 and not tnode.has_sufficient_edges():
2293 candidates = [x for x in graph_list if
2294 (x is not tnode and
2295 x.dsa_dnstr not in tnode.edge_from)]
2297 DEBUG_BLUE("looking for random link for %s. r_len %d, "
2298 "graph len %d candidates %d"
2299 % (tnode.dsa_dnstr, r_len, len(graph_list),
2300 len(candidates)))
2302 DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])
2304 while candidates and not tnode.has_sufficient_edges():
2305 other = random.choice(candidates)
2306 DEBUG("trying to add candidate %s" % other.dsa_dstr)
2307 if not tnode.add_edge_from(other):
2308 DEBUG_RED("could not add %s" % other.dsa_dstr)
2309 candidates.remove(other)
2310 else:
2311 DEBUG_FN("not adding links to %s: nodes %s, links is %s/%s" %
2312 (tnode.dsa_dnstr, r_len, len(tnode.edge_from),
2313 tnode.max_edges))
2315 # Print the graph node in debug mode
2316 logger.debug("%s" % tnode)
2318 # For each edge directed to the local DC, ensure a nTDSConnection
2319 # points to us that satisfies the KCC criteria
2321 if tnode.dsa_dnstr == dc_local.dsa_dnstr:
2322 tnode.add_connections_from_edges(dc_local)
2324 if opts.verify or do_dot_files:
2325 dot_edges = []
2326 dot_vertices = set()
2327 for v1 in graph_list:
2328 dot_vertices.add(v1.dsa_dnstr)
2329 for v2 in v1.edge_from:
2330 dot_edges.append((v2, v1.dsa_dnstr))
2331 dot_vertices.add(v2)
2333 verify_properties = ('connected', 'directed_double_ring_or_small')
2334 verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
2335 label='%s__%s__%s' % (site_local.site_dnstr,
2336 nctype_lut[nc_x.nc_type],
2337 nc_x.nc_dnstr),
2338 properties=verify_properties, debug=DEBUG,
2339 verify=opts.verify,
2340 dot_files=do_dot_files, directed=True)
2342 def intrasite(self):
2343 """The head method for generating the intra-site KCC replica
2344 connection graph and attendant nTDSConnection objects
2345 in the samdb
2347 # Retrieve my DSA
2348 mydsa = self.my_dsa
2350 logger.debug("intrasite(): enter")
2352 # Test whether local site has topology disabled
2353 mysite = self.my_site
2354 if mysite.is_intrasite_topology_disabled():
2355 return
2357 detect_stale = (not mysite.is_detect_stale_disabled())
2358 for connect in mydsa.connect_table.values():
2359 if connect.to_be_added:
2360 DEBUG_CYAN("TO BE ADDED:\n%s" % connect)
2362 # Loop thru all the partitions, with gc_only False
2363 for partdn, part in self.part_table.items():
2364 self.construct_intrasite_graph(mysite, mydsa, part, False,
2365 detect_stale)
2366 for connect in mydsa.connect_table.values():
2367 if connect.to_be_added:
2368 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2370 # If the DC is a GC server, the KCC constructs an additional NC
2371 # replica graph (and creates nTDSConnection objects) for the
2372 # config NC as above, except that only NC replicas that "are present"
2373 # on GC servers are added to R.
2374 for connect in mydsa.connect_table.values():
2375 if connect.to_be_added:
2376 DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)
2378 # Do it again, with gc_only True
2379 for partdn, part in self.part_table.items():
2380 if part.is_config():
2381 self.construct_intrasite_graph(mysite, mydsa, part, True,
2382 detect_stale)
2384 # The DC repeats the NC replica graph computation and nTDSConnection
2385 # creation for each of the NC replica graphs, this time assuming
2386 # that no DC has failed. It does so by re-executing the steps as
2387 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2388 # set in the options attribute of the site settings object for
2389 # the local DC's site. (ie. we set "detec_stale" flag to False)
2390 for connect in mydsa.connect_table.values():
2391 if connect.to_be_added:
2392 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2394 # Loop thru all the partitions.
2395 for partdn, part in self.part_table.items():
2396 self.construct_intrasite_graph(mysite, mydsa, part, False,
2397 False) # don't detect stale
2399 # If the DC is a GC server, the KCC constructs an additional NC
2400 # replica graph (and creates nTDSConnection objects) for the
2401 # config NC as above, except that only NC replicas that "are present"
2402 # on GC servers are added to R.
2403 for connect in mydsa.connect_table.values():
2404 if connect.to_be_added:
2405 DEBUG_RED("TO BE ADDED:\n%s" % connect)
2407 for partdn, part in self.part_table.items():
2408 if part.is_config():
2409 self.construct_intrasite_graph(mysite, mydsa, part, True,
2410 False) # don't detect stale
2412 if opts.readonly:
2413 # Display any to be added or modified repsFrom
2414 for connect in mydsa.connect_table.values():
2415 if connect.to_be_deleted:
2416 logger.info("TO BE DELETED:\n%s" % connect)
2417 if connect.to_be_modified:
2418 logger.info("TO BE MODIFIED:\n%s" % connect)
2419 if connect.to_be_added:
2420 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
2422 mydsa.commit_connections(self.samdb, ro=True)
2423 else:
2424 # Commit any newly created connections to the samdb
2425 mydsa.commit_connections(self.samdb)
2427 def list_dsas(self):
2428 self.load_my_site()
2429 self.load_my_dsa()
2431 self.load_all_sites()
2432 self.load_all_partitions()
2433 self.load_all_transports()
2434 self.load_all_sitelinks()
2435 dsas = []
2436 for site in self.site_table.values():
2437 dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1)
2438 for dsa in site.dsa_table.values()])
2439 return dsas
2441 def load_samdb(self, dburl, lp, creds):
2442 self.samdb = SamDB(url=dburl,
2443 session_info=system_session(),
2444 credentials=creds, lp=lp)
2446 def plot_all_connections(self, basename, verify_properties=()):
2447 verify = verify_properties and opts.verify
2448 plot = opts.dot_files
2449 if not (verify or plot):
2450 return
2452 dot_edges = []
2453 dot_vertices = []
2454 edge_colours = []
2455 vertex_colours = []
2457 for dsa in self.dsa_by_dnstr.values():
2458 dot_vertices.append(dsa.dsa_dnstr)
2459 if dsa.is_ro():
2460 vertex_colours.append('#cc0000')
2461 else:
2462 vertex_colours.append('#0000cc')
2463 for con in dsa.connect_table.values():
2464 if con.is_rodc_topology():
2465 edge_colours.append('red')
2466 else:
2467 edge_colours.append('blue')
2468 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
2470 verify_and_dot(basename, dot_edges, vertices=dot_vertices,
2471 label=self.my_dsa_dnstr, properties=verify_properties,
2472 debug=DEBUG, verify=verify, dot_files=plot,
2473 directed=True, edge_colors=edge_colours,
2474 vertex_colors=vertex_colours)
2476 def run(self, dburl, lp, creds, forced_local_dsa=None,
2477 forget_local_links=False, forget_intersite_links=False):
2478 """Method to perform a complete run of the KCC and
2479 produce an updated topology for subsequent NC replica
2480 syncronization between domain controllers
2482 # We may already have a samdb setup if we are
2483 # currently importing an ldif for a test run
2484 if self.samdb is None:
2485 try:
2486 self.load_samdb(dburl, lp, creds)
2487 except ldb.LdbError, (num, msg):
2488 logger.error("Unable to open sam database %s : %s" %
2489 (dburl, msg))
2490 return 1
2492 if forced_local_dsa:
2493 self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" %
2494 forced_local_dsa)
2496 try:
2497 # Setup
2498 self.load_my_site()
2499 self.load_my_dsa()
2501 self.load_all_sites()
2502 self.load_all_partitions()
2503 self.load_all_transports()
2504 self.load_all_sitelinks()
2506 if opts.verify or opts.dot_files:
2507 guid_to_dnstr = {}
2508 for site in self.site_table.values():
2509 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
2510 for dnstr, dsa
2511 in site.dsa_table.items())
2513 self.plot_all_connections('dsa_initial')
2515 dot_edges = []
2516 current_reps, needed_reps = self.my_dsa.get_rep_tables()
2517 for dnstr, c_rep in current_reps.items():
2518 DEBUG("c_rep %s" % c_rep)
2519 dot_edges.append((self.my_dsa.dsa_dnstr, dnstr))
2521 verify_and_dot('dsa_repsFrom_initial', dot_edges,
2522 directed=True, label=self.my_dsa_dnstr,
2523 properties=(), debug=DEBUG, verify=opts.verify,
2524 dot_files=opts.dot_files)
2526 dot_edges = []
2527 for site in self.site_table.values():
2528 for dsa in site.dsa_table.values():
2529 current_reps, needed_reps = dsa.get_rep_tables()
2530 for dn_str, rep in current_reps.items():
2531 for reps_from in rep.rep_repsFrom:
2532 DEBUG("rep %s" % rep)
2533 dsa_guid = str(reps_from.source_dsa_obj_guid)
2534 dsa_dn = guid_to_dnstr[dsa_guid]
2535 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2537 verify_and_dot('dsa_repsFrom_initial_all', dot_edges,
2538 directed=True, label=self.my_dsa_dnstr,
2539 properties=(), debug=DEBUG, verify=opts.verify,
2540 dot_files=opts.dot_files)
2542 dot_edges = []
2543 for link in self.sitelink_table.values():
2544 for a, b in itertools.combinations(link.site_list, 2):
2545 dot_edges.append((str(a), str(b)))
2546 properties = ('connected',)
2547 verify_and_dot('dsa_sitelink_initial', dot_edges,
2548 directed=False,
2549 label=self.my_dsa_dnstr, properties=properties,
2550 debug=DEBUG, verify=opts.verify,
2551 dot_files=opts.dot_files)
2553 if forget_local_links:
2554 for dsa in self.my_site.dsa_table.values():
2555 dsa.connect_table = {k: v for k, v in
2556 dsa.connect_table.items()
2557 if v.is_rodc_topology()}
2558 self.plot_all_connections('dsa_forgotten_local')
2560 if forget_intersite_links:
2561 for site in self.site_table.values():
2562 for dsa in site.dsa_table.values():
2563 dsa.connect_table = {k: v for k, v in
2564 dsa.connect_table.items()
2565 if site is self.my_site and
2566 v.is_rodc_topology()}
2568 self.plot_all_connections('dsa_forgotten_all')
2569 # These are the published steps (in order) for the
2570 # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)
2572 # Step 1
2573 self.refresh_failed_links_connections()
2575 # Step 2
2576 self.intrasite()
2578 # Step 3
2579 all_connected = self.intersite()
2581 # Step 4
2582 self.remove_unneeded_ntdsconn(all_connected)
2584 # Step 5
2585 self.translate_ntdsconn()
2587 # Step 6
2588 self.remove_unneeded_failed_links_connections()
2590 # Step 7
2591 self.update_rodc_connection()
2593 if opts.verify or opts.dot_files:
2594 self.plot_all_connections('dsa_final',
2595 ('connected', 'forest_of_rings'))
2597 DEBUG_MAGENTA("there are %d dsa guids" % len(guid_to_dnstr))
2599 dot_edges = []
2600 edge_colors = []
2601 my_dnstr = self.my_dsa.dsa_dnstr
2602 current_reps, needed_reps = self.my_dsa.get_rep_tables()
2603 for dnstr, n_rep in needed_reps.items():
2604 for reps_from in n_rep.rep_repsFrom:
2605 guid_str = str(reps_from.source_dsa_obj_guid)
2606 dot_edges.append((my_dnstr, guid_to_dnstr[guid_str]))
2607 edge_colors.append('#' + str(n_rep.nc_guid)[:6])
2609 verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True,
2610 label=self.my_dsa_dnstr,
2611 properties=(), debug=DEBUG, verify=opts.verify,
2612 dot_files=opts.dot_files,
2613 edge_colors=edge_colors)
2615 dot_edges = []
2617 for site in self.site_table.values():
2618 for dsa in site.dsa_table.values():
2619 current_reps, needed_reps = dsa.get_rep_tables()
2620 for n_rep in needed_reps.values():
2621 for reps_from in n_rep.rep_repsFrom:
2622 dsa_guid = str(reps_from.source_dsa_obj_guid)
2623 dsa_dn = guid_to_dnstr[dsa_guid]
2624 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2626 verify_and_dot('dsa_repsFrom_final_all', dot_edges,
2627 directed=True, label=self.my_dsa_dnstr,
2628 properties=(), debug=DEBUG, verify=opts.verify,
2629 dot_files=opts.dot_files)
2631 except:
2632 raise
2634 return 0
2636 def import_ldif(self, dburl, lp, creds, ldif_file):
2637 """Import all objects and attributes that are relevent
2638 to the KCC algorithms from a previously exported LDIF file.
2640 The point of this function is to allow a programmer/debugger to
2641 import an LDIF file with non-security relevent information that
2642 was previously extracted from a DC database. The LDIF file is used
2643 to create a temporary abbreviated database. The KCC algorithm can
2644 then run against this abbreviated database for debug or test
2645 verification that the topology generated is computationally the
2646 same between different OSes and algorithms.
2648 :param dburl: path to the temporary abbreviated db to create
2649 :param ldif_file: path to the ldif file to import
2651 try:
2652 self.samdb = ldif_utils.ldif_to_samdb(dburl, lp, ldif_file,
2653 opts.forced_local_dsa)
2654 except ldif_utils.LdifError, e:
2655 print e
2656 return 1
2657 return 0
2659 def export_ldif(self, dburl, lp, creds, ldif_file):
2660 """Routine to extract all objects and attributes that are relevent
2661 to the KCC algorithms from a DC database.
2663 The point of this function is to allow a programmer/debugger to
2664 extract an LDIF file with non-security relevent information from
2665 a DC database. The LDIF file can then be used to "import" via
2666 the import_ldif() function this file into a temporary abbreviated
2667 database. The KCC algorithm can then run against this abbreviated
2668 database for debug or test verification that the topology generated
2669 is computationally the same between different OSes and algorithms.
2671 :param dburl: LDAP database URL to extract info from
2672 :param ldif_file: output LDIF file name to create
2674 try:
2675 ldif_utils.samdb_to_ldif_file(self.samdb, dburl, lp, creds,
2676 ldif_file)
2677 except ldif_utils.LdifError, e:
2678 print e
2679 return 1
2680 return 0
2682 ##################################################
2683 # Global Functions
2684 ##################################################
2687 def get_spanning_tree_edges(graph, my_site, label=None):
2688 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
2689 # just the shortest-paths connecting colored vertices
2691 internal_edges = set()
2693 for e_set in graph.edge_set:
2694 edgeType = None
2695 for v in graph.vertices:
2696 v.edges = []
2698 # All con_type in an edge set is the same
2699 for e in e_set.edges:
2700 edgeType = e.con_type
2701 for v in e.vertices:
2702 v.edges.append(e)
2704 if opts.verify or opts.dot_files:
2705 graph_edges = [(a.site.site_dnstr, b.site.site_dnstr)
2706 for a, b in
2707 itertools.chain(
2708 *(itertools.combinations(edge.vertices, 2)
2709 for edge in e_set.edges))]
2710 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2712 if opts.dot_files and opts.debug:
2713 write_dot_file('edgeset_%s' % (edgeType,), graph_edges,
2714 vertices=graph_nodes, label=label)
2716 if opts.verify:
2717 verify_graph('spanning tree edge set %s' % edgeType,
2718 graph_edges, vertices=graph_nodes,
2719 properties=('complete', 'connected'),
2720 debug=DEBUG)
2722 # Run dijkstra's algorithm with just the red vertices as seeds
2723 # Seed from the full replicas
2724 dijkstra(graph, edgeType, False)
2726 # Process edge set
2727 process_edge_set(graph, e_set, internal_edges)
2729 # Run dijkstra's algorithm with red and black vertices as the seeds
2730 # Seed from both full and partial replicas
2731 dijkstra(graph, edgeType, True)
2733 # Process edge set
2734 process_edge_set(graph, e_set, internal_edges)
2736 # All vertices have root/component as itself
2737 setup_vertices(graph)
2738 process_edge_set(graph, None, internal_edges)
2740 if opts.verify or opts.dot_files:
2741 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
2742 for e in internal_edges]
2743 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2744 verify_properties = ('multi_edge_forest',)
2745 verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label,
2746 properties=verify_properties, debug=DEBUG,
2747 verify=opts.verify,
2748 dot_files=opts.dot_files)
2750 # Phase 2: Run Kruskal's on the internal edges
2751 output_edges, components = kruskal(graph, internal_edges)
2753 # This recalculates the cost for the path connecting the
2754 # closest red vertex. Ignoring types is fine because NO
2755 # suboptimal edge should exist in the graph
2756 dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename
2757 # Phase 3: Process the output
2758 for v in graph.vertices:
2759 if v.is_red():
2760 v.dist_to_red = 0
2761 else:
2762 v.dist_to_red = v.repl_info.cost
2764 if opts.verify or opts.dot_files:
2765 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
2766 for e in internal_edges]
2767 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2768 verify_properties = ('multi_edge_forest',)
2769 verify_and_dot('postkruskal', graph_edges, graph_nodes,
2770 label=label, properties=verify_properties,
2771 debug=DEBUG, verify=opts.verify,
2772 dot_files=opts.dot_files)
2774 # Ensure only one-way connections for partial-replicas,
2775 # and make sure they point the right way.
2776 edge_list = []
2777 for edge in output_edges:
2778 # We know these edges only have two endpoints because we made
2779 # them.
2780 v, w = edge.vertices
2781 if v.site is my_site or w.site is my_site:
2782 if (((v.is_black() or w.is_black()) and
2783 v.dist_to_red != MAX_DWORD)):
2784 edge.directed = True
2786 if w.dist_to_red < v.dist_to_red:
2787 edge.vertices[:] = w, v
2788 edge_list.append(edge)
2790 if opts.verify or opts.dot_files:
2791 graph_edges = [[x.site.site_dnstr for x in e.vertices]
2792 for e in edge_list]
2793 #add the reverse edge if not directed.
2794 graph_edges.extend([x.site.site_dnstr
2795 for x in reversed(e.vertices)]
2796 for e in edge_list if not e.directed)
2797 graph_nodes = [x.site.site_dnstr for x in graph.vertices]
2798 verify_properties = ()
2799 verify_and_dot('post-one-way-partial', graph_edges, graph_nodes,
2800 label=label, properties=verify_properties,
2801 debug=DEBUG, verify=opts.verify,
2802 directed=True,
2803 dot_files=opts.dot_files)
2805 # count the components
2806 return edge_list, components
2809 def sort_replica_by_dsa_guid(rep1, rep2):
2810 """Helper to sort NCReplicas by their DSA guids
2812 The guids need to be sorted in their NDR form.
2814 :param rep1: An NC replica
2815 :param rep2: Another replica
2816 :return: -1, 0, or 1, indicating sort order.
2818 return cmp(ndr_pack(rep1.rep_dsa_guid), ndr_pack(rep2.rep_dsa_guid))
2821 def sort_dsa_by_gc_and_guid(dsa1, dsa2):
2822 """Helper to sort DSAs by guid global catalog status
2824 GC DSAs come before non-GC DSAs, other than that, the guids are
2825 sorted in NDR form.
2827 :param dsa1: A DSA object
2828 :param dsa2: Another DSA
2829 :return: -1, 0, or 1, indicating sort order.
2831 if dsa1.is_gc() and not dsa2.is_gc():
2832 return -1
2833 if not dsa1.is_gc() and dsa2.is_gc():
2834 return +1
2835 return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))
2838 def is_smtp_replication_available():
2839 """Can the KCC use SMTP replication?
2841 Currently always returns false because Samba doesn't implement
2842 SMTP transfer for NC changes between DCs.
2844 :return: Boolean (always False)
2846 return False
2849 def create_edge(con_type, site_link, guid_to_vertex):
2850 e = MultiEdge()
2851 e.site_link = site_link
2852 e.vertices = []
2853 for site_guid in site_link.site_list:
2854 if str(site_guid) in guid_to_vertex:
2855 e.vertices.extend(guid_to_vertex.get(str(site_guid)))
2856 e.repl_info.cost = site_link.cost
2857 e.repl_info.options = site_link.options
2858 e.repl_info.interval = site_link.interval
2859 e.repl_info.schedule = convert_schedule_to_repltimes(site_link.schedule)
2860 e.con_type = con_type
2861 e.directed = False
2862 return e
2865 def create_auto_edge_set(graph, transport):
2866 e_set = MultiEdgeSet()
2867 # use a NULL guid, not associated with a SiteLinkBridge object
2868 e_set.guid = misc.GUID()
2869 for site_link in graph.edges:
2870 if site_link.con_type == transport:
2871 e_set.edges.append(site_link)
2873 return e_set
2876 def create_edge_set(graph, transport, site_link_bridge):
2877 # TODO not implemented - need to store all site link bridges
2878 e_set = MultiEdgeSet()
2879 # e_set.guid = site_link_bridge
2880 return e_set
2883 def setup_vertices(graph):
2884 for v in graph.vertices:
2885 if v.is_white():
2886 v.repl_info.cost = MAX_DWORD
2887 v.root = None
2888 v.component_id = None
2889 else:
2890 v.repl_info.cost = 0
2891 v.root = v
2892 v.component_id = v
2894 v.repl_info.interval = 0
2895 v.repl_info.options = 0xFFFFFFFF
2896 v.repl_info.schedule = None # TODO highly suspicious
2897 v.demoted = False
2900 def dijkstra(graph, edge_type, include_black):
2901 queue = []
2902 setup_dijkstra(graph, edge_type, include_black, queue)
2903 while len(queue) > 0:
2904 cost, guid, vertex = heapq.heappop(queue)
2905 for edge in vertex.edges:
2906 for v in edge.vertices:
2907 if v is not vertex:
2908 # add new path from vertex to v
2909 try_new_path(graph, queue, vertex, edge, v)
2912 def setup_dijkstra(graph, edge_type, include_black, queue):
2913 setup_vertices(graph)
2914 for vertex in graph.vertices:
2915 if vertex.is_white():
2916 continue
2918 if (((vertex.is_black() and not include_black)
2919 or edge_type not in vertex.accept_black
2920 or edge_type not in vertex.accept_red_red)):
2921 vertex.repl_info.cost = MAX_DWORD
2922 vertex.root = None # NULL GUID
2923 vertex.demoted = True # Demoted appears not to be used
2924 else:
2925 heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex))
2928 def try_new_path(graph, queue, vfrom, edge, vto):
2929 newRI = ReplInfo()
2930 # What this function checks is that there is a valid time frame for
2931 # which replication can actually occur, despite being adequately
2932 # connected
2933 intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI)
2935 # If the new path costs more than the current, then ignore the edge
2936 if newRI.cost > vto.repl_info.cost:
2937 return
2939 if newRI.cost < vto.repl_info.cost and not intersect:
2940 return
2942 new_duration = total_schedule(newRI.schedule)
2943 old_duration = total_schedule(vto.repl_info.schedule)
2945 # Cheaper or longer schedule
2946 if newRI.cost < vto.repl_info.cost or new_duration > old_duration:
2947 vto.root = vfrom.root
2948 vto.component_id = vfrom.component_id
2949 vto.repl_info = newRI
2950 heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))
2953 def check_demote_vertex(vertex, edge_type):
2954 if vertex.is_white():
2955 return
2957 # Accepts neither red-red nor black edges, demote
2958 if ((edge_type not in vertex.accept_black and
2959 edge_type not in vertex.accept_red_red)):
2960 vertex.repl_info.cost = MAX_DWORD
2961 vertex.root = None
2962 vertex.demoted = True # Demoted appears not to be used
2965 def undemote_vertex(vertex):
2966 if vertex.is_white():
2967 return
2969 vertex.repl_info.cost = 0
2970 vertex.root = vertex
2971 vertex.demoted = False
2974 def process_edge_set(graph, e_set, internal_edges):
2975 if e_set is None:
2976 for edge in graph.edges:
2977 for vertex in edge.vertices:
2978 check_demote_vertex(vertex, edge.con_type)
2979 process_edge(graph, edge, internal_edges)
2980 for vertex in edge.vertices:
2981 undemote_vertex(vertex)
2982 else:
2983 for edge in e_set.edges:
2984 process_edge(graph, edge, internal_edges)
2987 def process_edge(graph, examine, internal_edges):
2988 # Find the set of all vertices touches the edge to examine
2989 vertices = []
2990 for v in examine.vertices:
2991 # Append a 4-tuple of color, repl cost, guid and vertex
2992 vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v))
2993 # Sort by color, lower
2994 DEBUG("vertices is %s" % vertices)
2995 vertices.sort()
2997 color, cost, guid, bestv = vertices[0]
2998 # Add to internal edges an edge from every colored vertex to bestV
2999 for v in examine.vertices:
3000 if v.component_id is None or v.root is None:
3001 continue
3003 # Only add edge if valid inter-tree edge - needs a root and
3004 # different components
3005 if ((bestv.component_id is not None and
3006 bestv.root is not None and
3007 v.component_id is not None and
3008 v.root is not None and
3009 bestv.component_id != v.component_id)):
3010 add_int_edge(graph, internal_edges, examine, bestv, v)
3013 # Add internal edge, endpoints are roots of the vertices to pass in
3014 # and are always colored
3015 def add_int_edge(graph, internal_edges, examine, v1, v2):
3016 root1 = v1.root
3017 root2 = v2.root
3019 red_red = False
3020 if root1.is_red() and root2.is_red():
3021 red_red = True
3023 if red_red:
3024 if ((examine.con_type not in root1.accept_red_red
3025 or examine.con_type not in root2.accept_red_red)):
3026 return
3027 elif (examine.con_type not in root1.accept_black
3028 or examine.con_type not in root2.accept_black):
3029 return
3031 ri = ReplInfo()
3032 ri2 = ReplInfo()
3034 # Create the transitive replInfo for the two trees and this edge
3035 if not combine_repl_info(v1.repl_info, v2.repl_info, ri):
3036 return
3037 # ri is now initialized
3038 if not combine_repl_info(ri, examine.repl_info, ri2):
3039 return
3041 newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type,
3042 examine.site_link)
3043 # Order by vertex guid
3044 #XXX guid comparison using ndr_pack
3045 if newIntEdge.v1.ndrpacked_guid > newIntEdge.v2.ndrpacked_guid:
3046 newIntEdge.v1 = root2
3047 newIntEdge.v2 = root1
3049 internal_edges.add(newIntEdge)
3052 def kruskal(graph, edges):
3053 for v in graph.vertices:
3054 v.edges = []
3056 components = set([x for x in graph.vertices if not x.is_white()])
3057 edges = list(edges)
3059 # Sorted based on internal comparison function of internal edge
3060 edges.sort()
3062 #XXX expected_num_tree_edges is never used
3063 expected_num_tree_edges = 0 # TODO this value makes little sense
3065 count_edges = 0
3066 output_edges = []
3067 index = 0
3068 while index < len(edges): # TODO and num_components > 1
3069 e = edges[index]
3070 parent1 = find_component(e.v1)
3071 parent2 = find_component(e.v2)
3072 if parent1 is not parent2:
3073 count_edges += 1
3074 add_out_edge(graph, output_edges, e)
3075 parent1.component_id = parent2
3076 components.discard(parent1)
3078 index += 1
3080 return output_edges, len(components)
3083 def find_component(vertex):
3084 if vertex.component_id is vertex:
3085 return vertex
3087 current = vertex
3088 while current.component_id is not current:
3089 current = current.component_id
3091 root = current
3092 current = vertex
3093 while current.component_id is not root:
3094 n = current.component_id
3095 current.component_id = root
3096 current = n
3098 return root
3101 def add_out_edge(graph, output_edges, e):
3102 v1 = e.v1
3103 v2 = e.v2
3105 # This multi-edge is a 'real' edge with no GUID
3106 ee = MultiEdge()
3107 ee.directed = False
3108 ee.site_link = e.site_link
3109 ee.vertices.append(v1)
3110 ee.vertices.append(v2)
3111 ee.con_type = e.e_type
3112 ee.repl_info = e.repl_info
3113 output_edges.append(ee)
3115 v1.edges.append(ee)
3116 v2.edges.append(ee)
3119 def test_all_reps_from(lp, creds, rng_seed=None):
3120 kcc = KCC()
3121 kcc.load_samdb(opts.dburl, lp, creds)
3122 dsas = kcc.list_dsas()
3123 needed_parts = {}
3124 current_parts = {}
3126 guid_to_dnstr = {}
3127 for site in kcc.site_table.values():
3128 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
3129 for dnstr, dsa in site.dsa_table.items())
3131 dot_edges = []
3132 dot_vertices = []
3133 colours = []
3134 vertex_colours = []
3136 for dsa_dn in dsas:
3137 if rng_seed:
3138 random.seed(rng_seed)
3139 kcc = KCC()
3140 kcc.run(opts.dburl, lp, creds, forced_local_dsa=dsa_dn,
3141 forget_local_links=opts.forget_local_links,
3142 forget_intersite_links=opts.forget_intersite_links)
3144 current, needed = kcc.my_dsa.get_rep_tables()
3146 for dsa in kcc.my_site.dsa_table.values():
3147 if dsa is kcc.my_dsa:
3148 continue
3149 kcc.translate_ntdsconn(dsa)
3150 c, n = dsa.get_rep_tables()
3151 current.update(c)
3152 needed.update(n)
3154 for name, rep_table, rep_parts in (
3155 ('needed', needed, needed_parts),
3156 ('current', current, current_parts)):
3157 for part, nc_rep in rep_table.items():
3158 edges = rep_parts.setdefault(part, [])
3159 for reps_from in nc_rep.rep_repsFrom:
3160 source = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
3161 dest = guid_to_dnstr[str(nc_rep.rep_dsa_guid)]
3162 edges.append((source, dest))
3164 for site in kcc.site_table.values():
3165 for dsa in site.dsa_table.values():
3166 if dsa.is_ro():
3167 vertex_colours.append('#cc0000')
3168 else:
3169 vertex_colours.append('#0000cc')
3170 dot_vertices.append(dsa.dsa_dnstr)
3171 if dsa.connect_table:
3172 DEBUG_FN("DSA %s %s connections:\n%s" %
3173 (dsa.dsa_dnstr, len(dsa.connect_table),
3174 [x.from_dnstr for x in
3175 dsa.connect_table.values()]))
3176 for con in dsa.connect_table.values():
3177 if con.is_rodc_topology():
3178 colours.append('red')
3179 else:
3180 colours.append('blue')
3181 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
3183 verify_and_dot('all-dsa-connections', dot_edges, vertices=dot_vertices,
3184 label="all dsa NTDSConnections", properties=(),
3185 debug=DEBUG, verify=opts.verify, dot_files=opts.dot_files,
3186 directed=True, edge_colors=colours,
3187 vertex_colors=vertex_colours)
3189 for name, rep_parts in (('needed', needed_parts),
3190 ('current', current_parts)):
3191 for part, edges in rep_parts.items():
3192 verify_and_dot('all-repsFrom_%s__%s' % (name, part), edges,
3193 directed=True, label=part,
3194 properties=(), debug=DEBUG, verify=opts.verify,
3195 dot_files=opts.dot_files)
3198 logger = logging.getLogger("samba_kcc")
3199 logger.addHandler(logging.StreamHandler(sys.stdout))
3200 DEBUG = logger.debug
3203 def _color_debug(*args, **kwargs):
3204 DEBUG('%s%s%s' % (kwargs['color'], args[0], C_NORMAL), *args[1:])
3206 _globals = globals()
3207 for _color in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW',
3208 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA',
3209 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'):
3210 _globals['DEBUG_' + _color] = partial(_color_debug, color=_globals[_color])
3213 def DEBUG_FN(msg=''):
3214 import traceback
3215 filename, lineno, function, text = traceback.extract_stack(None, 2)[0]
3216 DEBUG("%s%s:%s%s %s%s()%s '%s'" % (CYAN, filename, BLUE, lineno,
3217 CYAN, function, C_NORMAL, msg))
3220 ##################################################
3221 # samba_kcc entry point
3222 ##################################################
3224 parser = optparse.OptionParser("samba_kcc [options]")
3225 sambaopts = options.SambaOptions(parser)
3226 credopts = options.CredentialsOptions(parser)
3228 parser.add_option_group(sambaopts)
3229 parser.add_option_group(credopts)
3230 parser.add_option_group(options.VersionOptions(parser))
3232 parser.add_option("--readonly", default=False,
3233 help="compute topology but do not update database",
3234 action="store_true")
3236 parser.add_option("--debug",
3237 help="debug output",
3238 action="store_true")
3240 parser.add_option("--verify",
3241 help="verify that assorted invariants are kept",
3242 action="store_true")
3244 parser.add_option("--list-verify-tests",
3245 help=("list what verification actions are available "
3246 "and do nothing else"),
3247 action="store_true")
3249 parser.add_option("--no-dot-files", dest='dot_files',
3250 help="Don't write dot graph files in /tmp",
3251 default=True, action="store_false")
3253 parser.add_option("--seed",
3254 help="random number seed",
3255 type=int)
3257 parser.add_option("--importldif",
3258 help="import topology ldif file",
3259 type=str, metavar="<file>")
3261 parser.add_option("--exportldif",
3262 help="export topology ldif file",
3263 type=str, metavar="<file>")
3265 parser.add_option("-H", "--URL",
3266 help="LDB URL for database or target server",
3267 type=str, metavar="<URL>", dest="dburl")
3269 parser.add_option("--tmpdb",
3270 help="schemaless database file to create for ldif import",
3271 type=str, metavar="<file>")
3273 parser.add_option("--now",
3274 help=("assume current time is this ('YYYYmmddHHMMSS[tz]',"
3275 " default: system time)"),
3276 type=str, metavar="<date>")
3278 parser.add_option("--forced-local-dsa",
3279 help="run calculations assuming the DSA is this DN",
3280 type=str, metavar="<DSA>")
3282 parser.add_option("--attempt-live-connections", default=False,
3283 help="Attempt to connect to other DSAs to test links",
3284 action="store_true")
3286 parser.add_option("--list-valid-dsas", default=False,
3287 help=("Print a list of DSA dnstrs that could be"
3288 " used in --forced-local-dsa"),
3289 action="store_true")
3291 parser.add_option("--test-all-reps-from", default=False,
3292 help="Create and verify a graph of reps-from for every DSA",
3293 action="store_true")
3295 parser.add_option("--forget-local-links", default=False,
3296 help="pretend not to know the existing local topology",
3297 action="store_true")
3299 parser.add_option("--forget-intersite-links", default=False,
3300 help="pretend not to know the existing intersite topology",
3301 action="store_true")
3304 opts, args = parser.parse_args()
3307 if opts.list_verify_tests:
3308 list_verify_tests()
3309 sys.exit(0)
3311 if opts.debug:
3312 logger.setLevel(logging.DEBUG)
3313 elif opts.readonly:
3314 logger.setLevel(logging.INFO)
3315 else:
3316 logger.setLevel(logging.WARNING)
3318 # initialize seed from optional input parameter
3319 if opts.seed:
3320 random.seed(opts.seed)
3321 else:
3322 random.seed(0xACE5CA11)
3324 if opts.now:
3325 for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"):
3326 try:
3327 now_tuple = time.strptime(opts.now, timeformat)
3328 break
3329 except ValueError:
3330 pass
3331 else:
3332 # else happens if break doesn't --> no match
3333 print >> sys.stderr, "could not parse time '%s'" % opts.now
3334 sys.exit(1)
3336 unix_now = int(time.mktime(now_tuple))
3337 else:
3338 unix_now = int(time.time())
3340 nt_now = unix2nttime(unix_now)
3342 lp = sambaopts.get_loadparm()
3343 creds = credopts.get_credentials(lp, fallback_machine=True)
3345 if opts.dburl is None:
3346 opts.dburl = lp.samdb_url()
3348 if opts.test_all_reps_from:
3349 opts.readonly = True
3350 rng_seed = opts.seed or 0xACE5CA11
3351 test_all_reps_from(lp, creds, rng_seed=rng_seed)
3352 sys.exit()
3354 # Instantiate Knowledge Consistency Checker and perform run
3355 kcc = KCC()
3357 if opts.exportldif:
3358 rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
3359 sys.exit(rc)
3361 if opts.importldif:
3362 if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
3363 logger.error("Specify a target temp database file with --tmpdb option")
3364 sys.exit(1)
3366 rc = kcc.import_ldif(opts.tmpdb, lp, creds, opts.importldif)
3367 if rc != 0:
3368 sys.exit(rc)
3370 if opts.list_valid_dsas:
3371 kcc.load_samdb(opts.dburl, lp, creds)
3372 print '\n'.join(kcc.list_dsas())
3373 sys.exit()
3375 try:
3376 rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa,
3377 opts.forget_local_links, opts.forget_intersite_links)
3378 sys.exit(rc)
3380 except GraphError, e:
3381 print e
3382 sys.exit(1)