KCC: improve docstring for KCC.merge_failed_links()
[Samba.git] / source4 / scripting / bin / samba_kcc
blob0100149c510f597e6ad2acd750747af75d18bf09
1 #!/usr/bin/env python
3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
6 # Copyright (C) Andrew Bartlett 2015
8 # Andrew Bartlett's alleged work performed by his underlings Douglas
9 # Bagnall and Garming Sam.
11 # This program is free software; you can redistribute it and/or modify
12 # it under the terms of the GNU General Public License as published by
13 # the Free Software Foundation; either version 3 of the License, or
14 # (at your option) any later version.
16 # This program is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU General Public License for more details.
21 # You should have received a copy of the GNU General Public License
22 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 import os
25 import sys
26 import random
27 import uuid
29 # ensure we get messages out immediately, so they get in the samba logs,
30 # and don't get swallowed by a timeout
31 os.environ['PYTHONUNBUFFERED'] = '1'
33 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
34 # heimdal can get mutual authentication errors due to the 24 second difference
35 # between UTC and GMT when using some zone files (eg. the PDT zone from
36 # the US)
37 os.environ["TZ"] = "GMT"
39 # Find right directory when running from source tree
40 sys.path.insert(0, "bin/python")
42 import optparse
43 import logging
44 import itertools
45 import heapq
46 import time
47 from functools import partial
49 from samba import (
50 getopt as options,
51 ldb,
52 dsdb,
53 drs_utils,
54 nttime2unix)
55 from samba.auth import system_session
56 from samba.samdb import SamDB
57 from samba.dcerpc import drsuapi
58 from samba.kcc_utils import *
59 from samba.graph_utils import *
60 from samba import ldif_utils
63 class KCC(object):
64 """The Knowledge Consistency Checker class.
66 A container for objects and methods allowing a run of the KCC. Produces a
67 set of connections in the samdb for which the Distributed Replication
68 Service can then utilize to replicate naming contexts
70 :param unix_now: The putative current time in seconds since 1970.
71 :param read_only: Don't write to the database.
72 :param verify: Check topological invariants for the generated graphs
73 :param debug: Write verbosely to stderr.
74 "param dot_files: write Graphviz files in /tmp showing topology
75 """
76 def __init__(self):
77 """Initializes the partitions class which can hold
78 our local DCs partitions or all the partitions in
79 the forest
80 """
81 self.part_table = {} # partition objects
82 self.site_table = {}
83 self.transport_table = {}
84 self.ip_transport = None
85 self.sitelink_table = {}
86 self.dsa_by_dnstr = {}
87 self.dsa_by_guid = {}
89 self.get_dsa_by_guidstr = self.dsa_by_guid.get
90 self.get_dsa = self.dsa_by_dnstr.get
92 # TODO: These should be backed by a 'permanent' store so that when
93 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
94 # the failure information can be returned
95 self.kcc_failed_links = {}
96 self.kcc_failed_connections = set()
98 # Used in inter-site topology computation. A list
99 # of connections (by NTDSConnection object) that are
100 # to be kept when pruning un-needed NTDS Connections
101 self.kept_connections = set()
103 self.my_dsa_dnstr = None # My dsa DN
104 self.my_dsa = None # My dsa object
106 self.my_site_dnstr = None
107 self.my_site = None
109 self.samdb = None
111 def load_all_transports(self):
112 """Loads the inter-site transport objects for Sites
114 :return: None
115 :raise KCCError: if no IP transport is found
117 try:
118 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
119 self.samdb.get_config_basedn(),
120 scope=ldb.SCOPE_SUBTREE,
121 expression="(objectClass=interSiteTransport)")
122 except ldb.LdbError, (enum, estr):
123 raise KCCError("Unable to find inter-site transports - (%s)" %
124 estr)
126 for msg in res:
127 dnstr = str(msg.dn)
129 transport = Transport(dnstr)
131 transport.load_transport(self.samdb)
132 self.transport_table.setdefault(str(transport.guid),
133 transport)
134 if transport.name == 'IP':
135 self.ip_transport = transport
137 if self.ip_transport is None:
138 raise KCCError("there doesn't seem to be an IP transport")
140 def load_all_sitelinks(self):
141 """Loads the inter-site siteLink objects
143 :return: None
144 :raise KCCError: if site-links aren't found
146 try:
147 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
148 self.samdb.get_config_basedn(),
149 scope=ldb.SCOPE_SUBTREE,
150 expression="(objectClass=siteLink)")
151 except ldb.LdbError, (enum, estr):
152 raise KCCError("Unable to find inter-site siteLinks - (%s)" % estr)
154 for msg in res:
155 dnstr = str(msg.dn)
157 # already loaded
158 if dnstr in self.sitelink_table:
159 continue
161 sitelink = SiteLink(dnstr)
163 sitelink.load_sitelink(self.samdb)
165 # Assign this siteLink to table
166 # and index by dn
167 self.sitelink_table[dnstr] = sitelink
169 def load_site(self, dn_str):
170 """Helper for load_my_site and load_all_sites.
172 Put all the site's DSAs into the KCC indices.
174 :param dn_str: a site dn_str
175 :return: the Site object pertaining to the dn_str
177 site = Site(dn_str, unix_now)
178 site.load_site(self.samdb)
180 # We avoid replacing the site with an identical copy in case
181 # somewhere else has a reference to the old one, which would
182 # lead to all manner of confusion and chaos.
183 guid = str(site.site_guid)
184 if guid not in self.site_table:
185 self.site_table[guid] = site
186 self.dsa_by_dnstr.update(site.dsa_table)
187 self.dsa_by_guid.update((str(x.dsa_guid), x)
188 for x in site.dsa_table.values())
190 return self.site_table[guid]
192 def load_my_site(self):
193 """Load the Site object for the local DSA.
195 :return: None
197 self.my_site_dnstr = ("CN=%s,CN=Sites,%s" % (
198 self.samdb.server_site_name(),
199 self.samdb.get_config_basedn()))
201 self.my_site = self.load_site(self.my_site_dnstr)
203 def load_all_sites(self):
204 """Discover all sites and create Site objects.
206 :return: None
207 :raise: KCCError if sites can't be found
209 try:
210 res = self.samdb.search("CN=Sites,%s" %
211 self.samdb.get_config_basedn(),
212 scope=ldb.SCOPE_SUBTREE,
213 expression="(objectClass=site)")
214 except ldb.LdbError, (enum, estr):
215 raise KCCError("Unable to find sites - (%s)" % estr)
217 for msg in res:
218 sitestr = str(msg.dn)
219 self.load_site(sitestr)
221 def load_my_dsa(self):
222 """Discover my nTDSDSA dn thru the rootDSE entry
224 :return: None
225 :raise: KCCError if DSA can't be found
227 dn = ldb.Dn(self.samdb, "<GUID=%s>" % self.samdb.get_ntds_GUID())
228 try:
229 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
230 attrs=["objectGUID"])
231 except ldb.LdbError, (enum, estr):
232 logger.warning("Search for %s failed: %s. This typically happens"
233 " in --importldif mode due to lack of module"
234 " support.", dn, estr)
235 try:
236 # We work around the failure above by looking at the
237 # dsServiceName that was put in the fake rootdse by
238 # the --exportldif, rather than the
239 # samdb.get_ntds_GUID(). The disadvantage is that this
240 # mode requires we modify the @ROOTDSE dnq to support
241 # --forced-local-dsa
242 service_name_res = self.samdb.search(base="",
243 scope=ldb.SCOPE_BASE,
244 attrs=["dsServiceName"])
245 dn = ldb.Dn(self.samdb,
246 service_name_res[0]["dsServiceName"][0])
248 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
249 attrs=["objectGUID"])
250 except ldb.LdbError, (enum, estr):
251 raise KCCError("Unable to find my nTDSDSA - (%s)" % estr)
253 if len(res) != 1:
254 raise KCCError("Unable to find my nTDSDSA at %s" %
255 dn.extended_str())
257 ntds_guid = misc.GUID(self.samdb.get_ntds_GUID())
258 if misc.GUID(res[0]["objectGUID"][0]) != ntds_guid:
259 raise KCCError("Did not find the GUID we expected,"
260 " perhaps due to --importldif")
262 self.my_dsa_dnstr = str(res[0].dn)
264 self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
266 if self.my_dsa_dnstr not in self.dsa_by_dnstr:
267 DEBUG_DARK_YELLOW("my_dsa %s isn't in self.dsas_by_dnstr:"
268 " it must be RODC.\n"
269 "Let's add it, because my_dsa is special!\n"
270 "(likewise for self.dsa_by_guid of course)" %
271 self.my_dsas_dnstr)
273 self.dsa_by_dnstr[self.my_dsa_dnstr] = self.my_dsa
274 self.dsa_by_guid[str(self.my_dsa.dsa_guid)] = self.my_dsa
276 def load_all_partitions(self):
277 """Discover all NCs thru the Partitions dn and
278 instantiate and load the NCs.
280 Each NC is inserted into the part_table by partition
281 dn string (not the nCName dn string)
283 ::returns: Raises KCCError on error
285 try:
286 res = self.samdb.search("CN=Partitions,%s" %
287 self.samdb.get_config_basedn(),
288 scope=ldb.SCOPE_SUBTREE,
289 expression="(objectClass=crossRef)")
290 except ldb.LdbError, (enum, estr):
291 raise KCCError("Unable to find partitions - (%s)" % estr)
293 for msg in res:
294 partstr = str(msg.dn)
296 # already loaded
297 if partstr in self.part_table:
298 continue
300 part = Partition(partstr)
302 part.load_partition(self.samdb)
303 self.part_table[partstr] = part
305 def should_be_present_test(self):
306 """Enumerate all loaded partitions and DSAs in local
307 site and test if NC should be present as replica
309 for partdn, part in self.part_table.items():
310 for dsadn, dsa in self.my_site.dsa_table.items():
311 needed, ro, partial = part.should_be_present(dsa)
312 logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
313 (dsadn, part.nc_dnstr, needed, ro, partial))
315 def refresh_failed_links_connections(self):
316 """Based on MS-ADTS 6.2.2.1"""
318 # Instead of NULL link with failure_count = 0, the tuple is
319 # simply removed
321 # LINKS: Refresh failed links
322 self.kcc_failed_links = {}
323 current, needed = self.my_dsa.get_rep_tables()
324 for replica in current.values():
325 # For every possible connection to replicate
326 for reps_from in replica.rep_repsFrom:
327 failure_count = reps_from.consecutive_sync_failures
328 if failure_count <= 0:
329 continue
331 dsa_guid = str(reps_from.source_dsa_obj_guid)
332 time_first_failure = reps_from.last_success
333 last_result = reps_from.last_attempt
334 dns_name = reps_from.dns_name1
336 f = self.kcc_failed_links.get(dsa_guid)
337 if not f:
338 f = KCCFailedObject(dsa_guid, failure_count,
339 time_first_failure, last_result,
340 dns_name)
341 self.kcc_failed_links[dsa_guid] = f
342 #elif f.failure_count == 0:
343 # f.failure_count = failure_count
344 # f.time_first_failure = time_first_failure
345 # f.last_result = last_result
346 else:
347 f.failure_count = max(f.failure_count, failure_count)
348 f.time_first_failure = min(f.time_first_failure,
349 time_first_failure)
350 f.last_result = last_result
352 # CONNECTIONS: Refresh failed connections
353 restore_connections = set()
354 if opts.attempt_live_connections:
355 DEBUG("refresh_failed_links: checking if links are still down")
356 for connection in self.kcc_failed_connections:
357 try:
358 drs_utils.drsuapi_connect(connection.dns_name, lp, creds)
359 # Failed connection is no longer failing
360 restore_connections.add(connection)
361 except drs_utils.drsException:
362 # Failed connection still failing
363 connection.failure_count += 1
364 else:
365 DEBUG("refresh_failed_links: not checking live links because we\n"
366 "weren't asked to --attempt-live-connections")
368 # Remove the restored connections from the failed connections
369 self.kcc_failed_connections.difference_update(restore_connections)
371 def is_stale_link_connection(self, target_dsa):
372 """Returns False if no tuple z exists in the kCCFailedLinks or
373 kCCFailedConnections variables such that z.UUIDDsa is the
374 objectGUID of the target dsa, z.FailureCount > 0, and
375 the current time - z.TimeFirstFailure > 2 hours.
377 # Returns True if tuple z exists...
378 failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
379 if failed_link:
380 # failure_count should be > 0, but check anyways
381 if failed_link.failure_count > 0:
382 unix_first_failure = \
383 nttime2unix(failed_link.time_first_failure)
384 # TODO guard against future
385 if unix_first_failure > unix_now:
386 logger.error("The last success time attribute for \
387 repsFrom is in the future!")
389 # Perform calculation in seconds
390 if (unix_now - unix_first_failure) > 60 * 60 * 2:
391 return True
393 # TODO connections
395 return False
397 # TODO: This should be backed by some form of local database
398 def remove_unneeded_failed_links_connections(self):
399 # Remove all tuples in kcc_failed_links where failure count = 0
400 # In this implementation, this should never happen.
402 # Remove all connections which were not used this run or connections
403 # that became active during this run.
404 pass
406 def remove_unneeded_ntdsconn(self, all_connected):
407 """Remove unneeded NTDS Connections once topology is calculated
409 Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections
411 :param all_connected: indicates whether all sites are connected
412 :return: None
414 mydsa = self.my_dsa
416 # New connections won't have GUIDs which are needed for
417 # sorting. Add them.
418 for cn_conn in mydsa.connect_table.values():
419 if cn_conn.guid is None:
420 if opts.readonly:
421 cn_conn.guid = misc.GUID(str(uuid.uuid4()))
422 cn_conn.whenCreated = nt_now
423 else:
424 cn_conn.load_connection(self.samdb)
426 for cn_conn in mydsa.connect_table.values():
428 s_dnstr = cn_conn.get_from_dnstr()
429 if s_dnstr is None:
430 cn_conn.to_be_deleted = True
431 continue
433 # Get the source DSA no matter what site
434 # XXX s_dsa is NEVER USED. It will be removed.
435 s_dsa = self.get_dsa(s_dnstr)
437 #XXX should an RODC be regarded as same site
438 same_site = s_dnstr in self.my_site.dsa_table
440 # Given an nTDSConnection object cn, if the DC with the
441 # nTDSDSA object dc that is the parent object of cn and
442 # the DC with the nTDSDA object referenced by cn!fromServer
443 # are in the same site, the KCC on dc deletes cn if all of
444 # the following are true:
446 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
448 # No site settings object s exists for the local DC's site, or
449 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
450 # s!options.
452 # Another nTDSConnection object cn2 exists such that cn and
453 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
454 # and either
456 # cn!whenCreated < cn2!whenCreated
458 # cn!whenCreated = cn2!whenCreated and
459 # cn!objectGUID < cn2!objectGUID
461 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
462 if same_site:
463 if not cn_conn.is_generated():
464 continue
466 if self.my_site.is_cleanup_ntdsconn_disabled():
467 continue
469 # Loop thru connections looking for a duplicate that
470 # fulfills the previous criteria
471 lesser = False
472 packed_guid = ndr_pack(cn_conn.guid)
473 for cn2_conn in mydsa.connect_table.values():
474 if cn2_conn is cn_conn:
475 continue
477 s2_dnstr = cn2_conn.get_from_dnstr()
479 # If the NTDS Connections has a different
480 # fromServer field then no match
481 if s2_dnstr != s_dnstr:
482 continue
484 #XXX GUID comparison
485 lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or
486 (cn_conn.whenCreated == cn2_conn.whenCreated and
487 packed_guid < ndr_pack(cn2_conn.guid)))
489 if lesser:
490 break
492 if lesser and not cn_conn.is_rodc_topology():
493 cn_conn.to_be_deleted = True
495 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
496 # object dc that is the parent object of cn and the DC with
497 # the nTDSDSA object referenced by cn!fromServer are in
498 # different sites, a KCC acting as an ISTG in dc's site
499 # deletes cn if all of the following are true:
501 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
503 # cn!fromServer references an nTDSDSA object for a DC
504 # in a site other than the local DC's site.
506 # The keepConnections sequence returned by
507 # CreateIntersiteConnections() does not contain
508 # cn!objectGUID, or cn is "superseded by" (see below)
509 # another nTDSConnection cn2 and keepConnections
510 # contains cn2!objectGUID.
512 # The return value of CreateIntersiteConnections()
513 # was true.
515 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
516 # cn!options
518 else: # different site
520 if not mydsa.is_istg():
521 continue
523 if not cn_conn.is_generated():
524 continue
526 # TODO
527 # We are directly using this connection in intersite or
528 # we are using a connection which can supersede this one.
530 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
531 # appear to be correct.
533 # 1. cn!fromServer and cn!parent appear inconsistent with
534 # no cn2
535 # 2. The repsFrom do not imply each other
537 if cn_conn in self.kept_connections: # and not_superceded:
538 continue
540 # This is the result of create_intersite_connections
541 if not all_connected:
542 continue
544 if not cn_conn.is_rodc_topology():
545 cn_conn.to_be_deleted = True
547 if mydsa.is_ro() or opts.readonly:
548 for connect in mydsa.connect_table.values():
549 if connect.to_be_deleted:
550 DEBUG_FN("TO BE DELETED:\n%s" % connect)
551 if connect.to_be_added:
552 DEBUG_FN("TO BE ADDED:\n%s" % connect)
554 # Peform deletion from our tables but perform
555 # no database modification
556 mydsa.commit_connections(self.samdb, ro=True)
557 else:
558 # Commit any modified connections
559 mydsa.commit_connections(self.samdb)
561 def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
562 """Part of MS-ADTS 6.2.2.5.
564 Update t_repsFrom if necessary to satisfy requirements. Such
565 updates are typically required when the IDL_DRSGetNCChanges
566 server has moved from one site to another--for example, to
567 enable compression when the server is moved from the
568 client's site to another site.
570 :param n_rep: NC replica we need
571 :param t_repsFrom: repsFrom tuple to modify
572 :param s_rep: NC replica at source DSA
573 :param s_dsa: source DSA
574 :param cn_conn: Local DSA NTDSConnection child
576 ::returns: (update) bit field containing which portion of the
577 repsFrom was modified. This bit field is suitable as input
578 to IDL_DRSReplicaModify ulModifyFields element, as it consists
579 of these bits:
580 drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
581 drsuapi.DRSUAPI_DRS_UPDATE_FLAGS
582 drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS
584 s_dnstr = s_dsa.dsa_dnstr
585 update = 0x0
587 same_site = s_dnstr in self.my_site.dsa_table
589 # if schedule doesn't match then update and modify
590 times = convert_schedule_to_repltimes(cn_conn.schedule)
591 if times != t_repsFrom.schedule:
592 t_repsFrom.schedule = times
593 update |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
595 # Bit DRS_PER_SYNC is set in replicaFlags if and only
596 # if nTDSConnection schedule has a value v that specifies
597 # scheduled replication is to be performed at least once
598 # per week.
599 if cn_conn.is_schedule_minimum_once_per_week():
601 if ((t_repsFrom.replica_flags &
602 drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0):
603 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
605 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
606 # if the source DSA and the local DC's nTDSDSA object are
607 # in the same site or source dsa is the FSMO role owner
608 # of one or more FSMO roles in the NC replica.
609 if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
611 if ((t_repsFrom.replica_flags &
612 drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0):
613 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
615 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
616 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
617 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
618 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
619 # t.replicaFlags if and only if s and the local DC's
620 # nTDSDSA object are in different sites.
621 if ((cn_conn.options &
622 dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0):
624 if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
625 # XXX WARNING
627 # it LOOKS as if this next test is a bit silly: it
628 # checks the flag then sets it if it not set; the same
629 # effect could be achieved by unconditionally setting
630 # it. But in fact the repsFrom object has special
631 # magic attached to it, and altering replica_flags has
632 # side-effects. That is bad in my opinion, but there
633 # you go.
634 if ((t_repsFrom.replica_flags &
635 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
636 t_repsFrom.replica_flags |= \
637 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
639 elif not same_site:
641 if ((t_repsFrom.replica_flags &
642 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
643 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
645 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
646 # and only if s and the local DC's nTDSDSA object are
647 # not in the same site and the
648 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
649 # clear in cn!options
650 if (not same_site and
651 (cn_conn.options &
652 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
654 if ((t_repsFrom.replica_flags &
655 drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0):
656 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
658 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
659 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
660 if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
662 if ((t_repsFrom.replica_flags &
663 drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0):
664 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
666 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
667 # set in t.replicaFlags if and only if cn!enabledConnection = false.
668 if not cn_conn.is_enabled():
670 if ((t_repsFrom.replica_flags &
671 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0):
672 t_repsFrom.replica_flags |= \
673 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
675 if ((t_repsFrom.replica_flags &
676 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0):
677 t_repsFrom.replica_flags |= \
678 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
680 # If s and the local DC's nTDSDSA object are in the same site,
681 # cn!transportType has no value, or the RDN of cn!transportType
682 # is CN=IP:
684 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
686 # t.uuidTransport = NULL GUID.
688 # t.uuidDsa = The GUID-based DNS name of s.
690 # Otherwise:
692 # Bit DRS_MAIL_REP in t.replicaFlags is set.
694 # If x is the object with dsname cn!transportType,
695 # t.uuidTransport = x!objectGUID.
697 # Let a be the attribute identified by
698 # x!transportAddressAttribute. If a is
699 # the dNSHostName attribute, t.uuidDsa = the GUID-based
700 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
702 # It appears that the first statement i.e.
704 # "If s and the local DC's nTDSDSA object are in the same
705 # site, cn!transportType has no value, or the RDN of
706 # cn!transportType is CN=IP:"
708 # could be a slightly tighter statement if it had an "or"
709 # between each condition. I believe this should
710 # be interpreted as:
712 # IF (same-site) OR (no-value) OR (type-ip)
714 # because IP should be the primary transport mechanism
715 # (even in inter-site) and the absense of the transportType
716 # attribute should always imply IP no matter if its multi-site
718 # NOTE MS-TECH INCORRECT:
720 # All indications point to these statements above being
721 # incorrectly stated:
723 # t.uuidDsa = The GUID-based DNS name of s.
725 # Let a be the attribute identified by
726 # x!transportAddressAttribute. If a is
727 # the dNSHostName attribute, t.uuidDsa = the GUID-based
728 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
730 # because the uuidDSA is a GUID and not a GUID-base DNS
731 # name. Nor can uuidDsa hold (s!parent)!a if not
732 # dNSHostName. What should have been said is:
734 # t.naDsa = The GUID-based DNS name of s
736 # That would also be correct if transportAddressAttribute
737 # were "mailAddress" because (naDsa) can also correctly
738 # hold the SMTP ISM service address.
740 nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
742 # We're not currently supporting SMTP replication
743 # so is_smtp_replication_available() is currently
744 # always returning False
745 if ((same_site or
746 cn_conn.transport_dnstr is None or
747 cn_conn.transport_dnstr.find("CN=IP") == 0 or
748 not is_smtp_replication_available())):
750 if ((t_repsFrom.replica_flags &
751 drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0):
752 t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
754 t_repsFrom.transport_guid = misc.GUID()
756 # See (NOTE MS-TECH INCORRECT) above
757 if t_repsFrom.version == 0x1:
758 if t_repsFrom.dns_name1 is None or \
759 t_repsFrom.dns_name1 != nastr:
760 t_repsFrom.dns_name1 = nastr
761 else:
762 if t_repsFrom.dns_name1 is None or \
763 t_repsFrom.dns_name2 is None or \
764 t_repsFrom.dns_name1 != nastr or \
765 t_repsFrom.dns_name2 != nastr:
766 t_repsFrom.dns_name1 = nastr
767 t_repsFrom.dns_name2 = nastr
769 else:
770 # XXX This entire branch is NEVER used! Because we don't do SMTP!
771 # (see the if condition above). Just close your eyes here.
772 if ((t_repsFrom.replica_flags &
773 drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0):
774 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP
776 # We have a transport type but its not an
777 # object in the database
778 if cn_conn.transport_guid not in self.transport_table:
779 raise KCCError("Missing inter-site transport - (%s)" %
780 cn_conn.transport_dnstr)
782 x_transport = self.transport_table[str(cn_conn.transport_guid)]
784 if t_repsFrom.transport_guid != x_transport.guid:
785 t_repsFrom.transport_guid = x_transport.guid
787 # See (NOTE MS-TECH INCORRECT) above
788 if x_transport.address_attr == "dNSHostName":
790 if t_repsFrom.version == 0x1:
791 if t_repsFrom.dns_name1 is None or \
792 t_repsFrom.dns_name1 != nastr:
793 t_repsFrom.dns_name1 = nastr
794 else:
795 if t_repsFrom.dns_name1 is None or \
796 t_repsFrom.dns_name2 is None or \
797 t_repsFrom.dns_name1 != nastr or \
798 t_repsFrom.dns_name2 != nastr:
799 t_repsFrom.dns_name1 = nastr
800 t_repsFrom.dns_name2 = nastr
802 else:
803 # MS tech specification says we retrieve the named
804 # attribute in "transportAddressAttribute" from the parent of
805 # the DSA object
806 try:
807 pdnstr = s_dsa.get_parent_dnstr()
808 attrs = [x_transport.address_attr]
810 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
811 attrs=attrs)
812 except ldb.LdbError, (enum, estr):
813 raise KCCError(
814 "Unable to find attr (%s) for (%s) - (%s)" %
815 (x_transport.address_attr, pdnstr, estr))
817 msg = res[0]
818 nastr = str(msg[x_transport.address_attr][0])
820 # See (NOTE MS-TECH INCORRECT) above
821 if t_repsFrom.version == 0x1:
822 if t_repsFrom.dns_name1 is None or \
823 t_repsFrom.dns_name1 != nastr:
824 t_repsFrom.dns_name1 = nastr
825 else:
826 if t_repsFrom.dns_name1 is None or \
827 t_repsFrom.dns_name2 is None or \
828 t_repsFrom.dns_name1 != nastr or \
829 t_repsFrom.dns_name2 != nastr:
831 t_repsFrom.dns_name1 = nastr
832 t_repsFrom.dns_name2 = nastr
834 if t_repsFrom.is_modified():
835 logger.debug("modify_repsFrom(): %s" % t_repsFrom)
837 def is_repsFrom_implied(self, n_rep, cn_conn):
838 """Given a NC replica and NTDS Connection, determine if the connection
839 implies a repsFrom tuple should be present from the source DSA listed
840 in the connection to the naming context
842 :param n_rep: NC replica
843 :param conn: NTDS Connection
844 ::returns (True || False), source DSA:
846 #XXX different conditions for "implies" than MS-ADTS 6.2.2
848 # NTDS Connection must satisfy all the following criteria
849 # to imply a repsFrom tuple is needed:
851 # cn!enabledConnection = true.
852 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
853 # cn!fromServer references an nTDSDSA object.
855 s_dsa = None
857 if cn_conn.is_enabled() and not cn_conn.is_rodc_topology():
858 s_dnstr = cn_conn.get_from_dnstr()
859 if s_dnstr is not None:
860 s_dsa = self.get_dsa(s_dnstr)
862 # No DSA matching this source DN string?
863 if s_dsa is None:
864 return False, None
866 # To imply a repsFrom tuple is needed, each of these
867 # must be True:
869 # An NC replica of the NC "is present" on the DC to
870 # which the nTDSDSA object referenced by cn!fromServer
871 # corresponds.
873 # An NC replica of the NC "should be present" on
874 # the local DC
875 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
877 if s_rep is None or not s_rep.is_present():
878 return False, None
880 # To imply a repsFrom tuple is needed, each of these
881 # must be True:
883 # The NC replica on the DC referenced by cn!fromServer is
884 # a writable replica or the NC replica that "should be
885 # present" on the local DC is a partial replica.
887 # The NC is not a domain NC, the NC replica that
888 # "should be present" on the local DC is a partial
889 # replica, cn!transportType has no value, or
890 # cn!transportType has an RDN of CN=IP.
892 implied = (not s_rep.is_ro() or n_rep.is_partial()) and \
893 (not n_rep.is_domain() or
894 n_rep.is_partial() or
895 cn_conn.transport_dnstr is None or
896 cn_conn.transport_dnstr.find("CN=IP") == 0)
898 if implied:
899 return True, s_dsa
900 else:
901 return False, None
903 def translate_ntdsconn(self, current_dsa=None):
904 """This function adjusts values of repsFrom abstract attributes of NC
905 replicas on the local DC to match those implied by
906 nTDSConnection objects.
907 [MS-ADTS] 6.2.2.5
909 count = 0
911 if current_dsa is None:
912 current_dsa = self.my_dsa
914 if current_dsa.is_translate_ntdsconn_disabled():
915 logger.debug("skipping translate_ntdsconn() "
916 "because disabling flag is set")
917 return
919 logger.debug("translate_ntdsconn(): enter")
921 current_rep_table, needed_rep_table = current_dsa.get_rep_tables()
923 # Filled in with replicas we currently have that need deleting
924 delete_reps = set()
926 # We're using the MS notation names here to allow
927 # correlation back to the published algorithm.
929 # n_rep - NC replica (n)
930 # t_repsFrom - tuple (t) in n!repsFrom
931 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
932 # object (s) such that (s!objectGUID = t.uuidDsa)
933 # In our IDL representation of repsFrom the (uuidDsa)
934 # attribute is called (source_dsa_obj_guid)
935 # cn_conn - (cn) is nTDSConnection object and child of the local
936 # DC's nTDSDSA object and (cn!fromServer = s)
937 # s_rep - source DSA replica of n
939 # If we have the replica and its not needed
940 # then we add it to the "to be deleted" list.
941 for dnstr in current_rep_table:
942 if dnstr not in needed_rep_table:
943 delete_reps.add(dnstr)
945 DEBUG_FN('current %d needed %d delete %d' % (len(current_rep_table),
946 len(needed_rep_table), len(delete_reps)))
948 if delete_reps:
949 DEBUG('deleting these reps: %s' % delete_reps)
950 for dnstr in delete_reps:
951 del current_rep_table[dnstr]
953 # Now perform the scan of replicas we'll need
954 # and compare any current repsFrom against the
955 # connections
956 for n_rep in needed_rep_table.values():
958 # load any repsFrom and fsmo roles as we'll
959 # need them during connection translation
960 n_rep.load_repsFrom(self.samdb)
961 n_rep.load_fsmo_roles(self.samdb)
963 # Loop thru the existing repsFrom tupples (if any)
964 # XXX This is a list and could contain duplicates
965 # (multiple load_repsFrom calls)
966 for t_repsFrom in n_rep.rep_repsFrom:
968 # for each tuple t in n!repsFrom, let s be the nTDSDSA
969 # object such that s!objectGUID = t.uuidDsa
970 guidstr = str(t_repsFrom.source_dsa_obj_guid)
971 s_dsa = self.get_dsa_by_guidstr(guidstr)
973 # Source dsa is gone from config (strange)
974 # so cleanup stale repsFrom for unlisted DSA
975 if s_dsa is None:
976 logger.warning("repsFrom source DSA guid (%s) not found" %
977 guidstr)
978 t_repsFrom.to_be_deleted = True
979 continue
981 s_dnstr = s_dsa.dsa_dnstr
983 # Retrieve my DSAs connection object (if it exists)
984 # that specifies the fromServer equivalent to
985 # the DSA that is specified in the repsFrom source
986 connections = current_dsa.get_connection_by_from_dnstr(s_dnstr)
988 count = 0
989 cn_conn = None
991 for con in connections:
992 if con.is_rodc_topology():
993 continue
994 cn_conn = con
996 # Let (cn) be the nTDSConnection object such that (cn)
997 # is a child of the local DC's nTDSDSA object and
998 # (cn!fromServer = s) and (cn!options) does not contain
999 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
1001 # KCC removes this repsFrom tuple if any of the following
1002 # is true:
1003 # cn = NULL.
1004 # [...]
1006 #XXX varying possible interpretations of rodc_topology
1007 if cn_conn is None:
1008 t_repsFrom.to_be_deleted = True
1009 continue
1011 # [...] KCC removes this repsFrom tuple if:
1013 # No NC replica of the NC "is present" on DSA that
1014 # would be source of replica
1016 # A writable replica of the NC "should be present" on
1017 # the local DC, but a partial replica "is present" on
1018 # the source DSA
1019 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
1021 if s_rep is None or not s_rep.is_present() or \
1022 (not n_rep.is_ro() and s_rep.is_partial()):
1024 t_repsFrom.to_be_deleted = True
1025 continue
1027 # If the KCC did not remove t from n!repsFrom, it updates t
1028 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
1030 # Loop thru connections and add implied repsFrom tuples
1031 # for each NTDSConnection under our local DSA if the
1032 # repsFrom is not already present
1033 for cn_conn in current_dsa.connect_table.values():
1035 implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn)
1036 if not implied:
1037 continue
1039 # Loop thru the existing repsFrom tupples (if any) and
1040 # if we already have a tuple for this connection then
1041 # no need to proceed to add. It will have been changed
1042 # to have the correct attributes above
1043 for t_repsFrom in n_rep.rep_repsFrom:
1044 guidstr = str(t_repsFrom.source_dsa_obj_guid)
1045 #XXX what?
1046 if s_dsa is self.get_dsa_by_guidstr(guidstr):
1047 s_dsa = None
1048 break
1050 if s_dsa is None:
1051 continue
1053 # Create a new RepsFromTo and proceed to modify
1054 # it according to specification
1055 t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
1057 t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
1059 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
1061 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
1063 # Add to our NC repsFrom as this is newly computed
1064 if t_repsFrom.is_modified():
1065 n_rep.rep_repsFrom.append(t_repsFrom)
1067 if opts.readonly:
1068 # Display any to be deleted or modified repsFrom
1069 text = n_rep.dumpstr_to_be_deleted()
1070 if text:
1071 logger.info("TO BE DELETED:\n%s" % text)
1072 text = n_rep.dumpstr_to_be_modified()
1073 if text:
1074 logger.info("TO BE MODIFIED:\n%s" % text)
1076 # Peform deletion from our tables but perform
1077 # no database modification
1078 n_rep.commit_repsFrom(self.samdb, ro=True)
1079 else:
1080 # Commit any modified repsFrom to the NC replica
1081 n_rep.commit_repsFrom(self.samdb)
1083 def merge_failed_links(self):
1084 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
1086 The KCC on a writable DC attempts to merge the link and connection
1087 failure information from bridgehead DCs in its own site to help it
1088 identify failed bridgehead DCs.
1090 Based on MS-ADTS 6.2.2.3.2 "Merge of kCCFailedLinks and kCCFailedLinks
1091 from Bridgeheads"
1093 :param ping: An oracle of current bridgehead availability
1094 :return: None
1096 # 1. Queries every bridgehead server in your site (other than yourself)
1097 # 2. For every ntDSConnection that references a server in a different
1098 # site merge all the failure info
1100 # XXX - not implemented yet
1101 if opts.attempt_live_connections:
1102 DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
1103 else:
1104 DEBUG_FN("skipping merge_failed_links() because it requires "
1105 "real network connections\n"
1106 "and we weren't asked to --attempt-live-connections")
1108 def setup_graph(self, part):
1109 """Set up a GRAPH, populated with a VERTEX for each site
1110 object, a MULTIEDGE for each siteLink object, and a
1111 MUTLIEDGESET for each siteLinkBridge object (or implied
1112 siteLinkBridge).
1114 ::returns: a new graph
1116 guid_to_vertex = {}
1117 # Create graph
1118 g = IntersiteGraph()
1119 # Add vertices
1120 for site_guid, site in self.site_table.items():
1121 vertex = Vertex(site, part)
1122 vertex.guid = site_guid
1123 vertex.ndrpacked_guid = ndr_pack(site.site_guid)
1124 g.vertices.add(vertex)
1126 if not guid_to_vertex.get(site_guid):
1127 guid_to_vertex[site_guid] = []
1129 guid_to_vertex[site_guid].append(vertex)
1131 connected_vertices = set()
1132 for transport_guid, transport in self.transport_table.items():
1133 # Currently only ever "IP"
1134 if transport.name != 'IP':
1135 DEBUG_FN("setup_graph is ignoring transport %s" %
1136 transport.name)
1137 continue
1138 for site_link_dn, site_link in self.sitelink_table.items():
1139 new_edge = create_edge(transport_guid, site_link,
1140 guid_to_vertex)
1141 connected_vertices.update(new_edge.vertices)
1142 g.edges.add(new_edge)
1144 # If 'Bridge all site links' is enabled and Win2k3 bridges required
1145 # is not set
1146 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1147 # No documentation for this however, ntdsapi.h appears to have:
1148 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1149 if (((self.my_site.site_options & 0x00000002) == 0
1150 and (self.my_site.site_options & 0x00001000) == 0)):
1151 g.edge_set.add(create_auto_edge_set(g, transport_guid))
1152 else:
1153 # TODO get all site link bridges
1154 for site_link_bridge in []:
1155 g.edge_set.add(create_edge_set(g, transport_guid,
1156 site_link_bridge))
1158 g.connected_vertices = connected_vertices
1160 #be less verbose in dot file output unless --debug
1161 do_dot_files = opts.dot_files and opts.debug
1162 dot_edges = []
1163 for edge in g.edges:
1164 for a, b in itertools.combinations(edge.vertices, 2):
1165 dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
1166 verify_properties = ()
1167 verify_and_dot('site_edges', dot_edges, directed=False,
1168 label=self.my_dsa_dnstr,
1169 properties=verify_properties, debug=DEBUG,
1170 verify=opts.verify,
1171 dot_files=do_dot_files)
1173 return g
1175 def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
1176 """Get a bridghead DC.
1178 :param site: site object representing for which a bridgehead
1179 DC is desired.
1180 :param part: crossRef for NC to replicate.
1181 :param transport: interSiteTransport object for replication
1182 traffic.
1183 :param partial_ok: True if a DC containing a partial
1184 replica or a full replica will suffice, False if only
1185 a full replica will suffice.
1186 :param detect_failed: True to detect failed DCs and route
1187 replication traffic around them, False to assume no DC
1188 has failed.
1189 ::returns: dsa object for the bridgehead DC or None
1192 bhs = self.get_all_bridgeheads(site, part, transport,
1193 partial_ok, detect_failed)
1194 if len(bhs) == 0:
1195 DEBUG_MAGENTA("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
1196 site.site_dnstr)
1197 return None
1198 else:
1199 DEBUG_GREEN("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
1200 (site.site_dnstr, bhs[0].dsa_dnstr))
1201 return bhs[0]
1203 def get_all_bridgeheads(self, site, part, transport,
1204 partial_ok, detect_failed):
1205 """Get all bridghead DCs satisfying the given criteria
1207 :param site: site object representing the site for which
1208 bridgehead DCs are desired.
1209 :param part: partition for NC to replicate.
1210 :param transport: interSiteTransport object for
1211 replication traffic.
1212 :param partial_ok: True if a DC containing a partial
1213 replica or a full replica will suffice, False if
1214 only a full replica will suffice.
1215 :param detect_failed: True to detect failed DCs and route
1216 replication traffic around them, FALSE to assume
1217 no DC has failed.
1218 ::returns: list of dsa object for available bridgehead
1219 DCs or None
1222 bhs = []
1224 logger.debug("get_all_bridgeheads: %s" % transport.name)
1225 if 'Site-5' in site.site_dnstr:
1226 DEBUG_RED("get_all_bridgeheads with %s, part%s, partial_ok %s"
1227 " detect_failed %s" % (site.site_dnstr, part.partstr,
1228 partial_ok, detect_failed))
1229 logger.debug(site.rw_dsa_table)
1230 for dsa in site.rw_dsa_table.values():
1232 pdnstr = dsa.get_parent_dnstr()
1234 # IF t!bridgeheadServerListBL has one or more values and
1235 # t!bridgeheadServerListBL does not contain a reference
1236 # to the parent object of dc then skip dc
1237 if ((len(transport.bridgehead_list) != 0 and
1238 pdnstr not in transport.bridgehead_list)):
1239 continue
1241 # IF dc is in the same site as the local DC
1242 # IF a replica of cr!nCName is not in the set of NC replicas
1243 # that "should be present" on dc or a partial replica of the
1244 # NC "should be present" but partialReplicasOkay = FALSE
1245 # Skip dc
1246 if self.my_site.same_site(dsa):
1247 needed, ro, partial = part.should_be_present(dsa)
1248 if not needed or (partial and not partial_ok):
1249 continue
1250 rep = dsa.get_current_replica(part.nc_dnstr)
1252 # ELSE
1253 # IF an NC replica of cr!nCName is not in the set of NC
1254 # replicas that "are present" on dc or a partial replica of
1255 # the NC "is present" but partialReplicasOkay = FALSE
1256 # Skip dc
1257 else:
1258 rep = dsa.get_current_replica(part.nc_dnstr)
1259 if rep is None or (rep.is_partial() and not partial_ok):
1260 continue
1262 # IF AmIRODC() and cr!nCName corresponds to default NC then
1263 # Let dsaobj be the nTDSDSA object of the dc
1264 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1265 # Skip dc
1266 if self.my_dsa.is_ro() and rep is not None and rep.is_default():
1267 if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1268 continue
1270 # IF t!name != "IP" and the parent object of dc has no value for
1271 # the attribute specified by t!transportAddressAttribute
1272 # Skip dc
1273 if transport.name != "IP":
1274 # MS tech specification says we retrieve the named
1275 # attribute in "transportAddressAttribute" from the parent
1276 # of the DSA object
1277 try:
1278 attrs = [transport.address_attr]
1280 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
1281 attrs=attrs)
1282 except ldb.LdbError, (enum, estr):
1283 continue
1285 msg = res[0]
1286 if transport.address_attr not in msg:
1287 continue
1288 #XXX nastr is NEVER USED. It will be removed.
1289 nastr = str(msg[transport.address_attr][0])
1291 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1292 # Skip dc
1293 if self.is_bridgehead_failed(dsa, detect_failed):
1294 DEBUG("bridgehead is failed")
1295 continue
1297 logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
1298 bhs.append(dsa)
1300 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1301 # s!options
1302 # SORT bhs such that all GC servers precede DCs that are not GC
1303 # servers, and otherwise by ascending objectGUID
1304 # ELSE
1305 # SORT bhs in a random order
1306 if site.is_random_bridgehead_disabled():
1307 bhs.sort(sort_dsa_by_gc_and_guid)
1308 else:
1309 random.shuffle(bhs)
1310 DEBUG_YELLOW(bhs)
1311 return bhs
1313 def is_bridgehead_failed(self, dsa, detect_failed):
1314 """Determine whether a given DC is known to be in a failed state
1315 ::returns: True if and only if the DC should be considered failed
1317 Here we DEPART from the pseudo code spec which appears to be
1318 wrong. It says, in full:
1320 /***** BridgeheadDCFailed *****/
1321 /* Determine whether a given DC is known to be in a failed state.
1322 * IN: objectGUID - objectGUID of the DC's nTDSDSA object.
1323 * IN: detectFailedDCs - TRUE if and only failed DC detection is
1324 * enabled.
1325 * RETURNS: TRUE if and only if the DC should be considered to be in a
1326 * failed state.
1328 BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
1330 IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
1331 the options attribute of the site settings object for the local
1332 DC's site
1333 RETURN FALSE
1334 ELSEIF a tuple z exists in the kCCFailedLinks or
1335 kCCFailedConnections variables such that z.UUIDDsa =
1336 objectGUID, z.FailureCount > 1, and the current time -
1337 z.TimeFirstFailure > 2 hours
1338 RETURN TRUE
1339 ELSE
1340 RETURN detectFailedDCs
1341 ENDIF
1344 where you will see detectFailedDCs is not behaving as
1345 advertised -- it is acting as a default return code in the
1346 event that a failure is not detected, not a switch turning
1347 detection on or off. Elsewhere the documentation seems to
1348 concur with the comment rather than the code.
1350 if not detect_failed:
1351 return False
1353 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1354 # When DETECT_STALE_DISABLED, we can never know of if
1355 # it's in a failed state
1356 if self.my_site.site_options & 0x00000008:
1357 return False
1359 return self.is_stale_link_connection(dsa)
1361 def create_connection(self, part, rbh, rsite, transport,
1362 lbh, lsite, link_opt, link_sched,
1363 partial_ok, detect_failed):
1364 """Create an nTDSConnection object with the given parameters
1365 if one does not already exist.
1367 :param part: crossRef object for the NC to replicate.
1368 :param rbh: nTDSDSA object for DC to act as the
1369 IDL_DRSGetNCChanges server (which is in a site other
1370 than the local DC's site).
1371 :param rsite: site of the rbh
1372 :param transport: interSiteTransport object for the transport
1373 to use for replication traffic.
1374 :param lbh: nTDSDSA object for DC to act as the
1375 IDL_DRSGetNCChanges client (which is in the local DC's site).
1376 :param lsite: site of the lbh
1377 :param link_opt: Replication parameters (aggregated siteLink options,
1378 etc.)
1379 :param link_sched: Schedule specifying the times at which
1380 to begin replicating.
1381 :partial_ok: True if bridgehead DCs containing partial
1382 replicas of the NC are acceptable.
1383 :param detect_failed: True to detect failed DCs and route
1384 replication traffic around them, FALSE to assume no DC
1385 has failed.
1387 rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
1388 partial_ok, False)
1389 rbh_table = {x.dsa_dnstr: x for x in rbhs_all}
1391 DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all),
1392 [x.dsa_dnstr for x in rbhs_all]))
1394 # MS-TECH says to compute rbhs_avail but then doesn't use it
1395 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1396 # partial_ok, detect_failed)
1398 lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
1399 partial_ok, False)
1400 if lbh.is_ro():
1401 lbhs_all.append(lbh)
1403 DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all),
1404 [x.dsa_dnstr for x in lbhs_all]))
1406 # MS-TECH says to compute lbhs_avail but then doesn't use it
1407 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1408 # partial_ok, detect_failed)
1410 # FOR each nTDSConnection object cn such that the parent of cn is
1411 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1412 for ldsa in lbhs_all:
1413 for cn in ldsa.connect_table.values():
1415 rdsa = rbh_table.get(cn.from_dnstr)
1416 if rdsa is None:
1417 continue
1419 DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr)
1420 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1421 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1422 # cn!transportType references t
1423 if ((cn.is_generated() and
1424 not cn.is_rodc_topology() and
1425 cn.transport_guid == transport.guid)):
1427 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1428 # cn!options and cn!schedule != sch
1429 # Perform an originating update to set cn!schedule to
1430 # sched
1431 if ((not cn.is_user_owned_schedule() and
1432 not cn.is_equivalent_schedule(link_sched))):
1433 cn.schedule = link_sched
1434 cn.set_modified(True)
1436 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1437 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1438 if cn.is_override_notify_default() and \
1439 cn.is_use_notify():
1441 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1442 # ri.Options
1443 # Perform an originating update to clear bits
1444 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1445 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1446 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
1447 cn.options &= \
1448 ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1449 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1450 cn.set_modified(True)
1452 # ELSE
1453 else:
1455 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1456 # ri.Options
1457 # Perform an originating update to set bits
1458 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1459 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1460 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1461 cn.options |= \
1462 (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1463 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1464 cn.set_modified(True)
1466 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1467 if cn.is_twoway_sync():
1469 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1470 # ri.Options
1471 # Perform an originating update to clear bit
1472 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1473 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
1474 cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1475 cn.set_modified(True)
1477 # ELSE
1478 else:
1480 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1481 # ri.Options
1482 # Perform an originating update to set bit
1483 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1484 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1485 cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1486 cn.set_modified(True)
1488 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1489 # in cn!options
1490 if cn.is_intersite_compression_disabled():
1492 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1493 # in ri.Options
1494 # Perform an originating update to clear bit
1495 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1496 # cn!options
1497 if ((link_opt &
1498 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0):
1499 cn.options &= \
1500 ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1501 cn.set_modified(True)
1503 # ELSE
1504 else:
1505 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1506 # ri.Options
1507 # Perform an originating update to set bit
1508 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1509 # cn!options
1510 if ((link_opt &
1511 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
1512 cn.options |= \
1513 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1514 cn.set_modified(True)
1516 # Display any modified connection
1517 if opts.readonly:
1518 if cn.to_be_modified:
1519 logger.info("TO BE MODIFIED:\n%s" % cn)
1521 ldsa.commit_connections(self.samdb, ro=True)
1522 else:
1523 ldsa.commit_connections(self.samdb)
1524 # ENDFOR
1526 valid_connections = 0
1528 # FOR each nTDSConnection object cn such that cn!parent is
1529 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1530 for ldsa in lbhs_all:
1531 for cn in ldsa.connect_table.values():
1533 rdsa = rbh_table.get(cn.from_dnstr)
1534 if rdsa is None:
1535 continue
1537 DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr)
1539 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1540 # cn!transportType references t) and
1541 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1542 if (((not cn.is_generated() or
1543 cn.transport_guid == transport.guid) and
1544 not cn.is_rodc_topology())):
1546 # LET rguid be the objectGUID of the nTDSDSA object
1547 # referenced by cn!fromServer
1548 # LET lguid be (cn!parent)!objectGUID
1550 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1551 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1552 # Increment cValidConnections by 1
1553 if ((not self.is_bridgehead_failed(rdsa, detect_failed) and
1554 not self.is_bridgehead_failed(ldsa, detect_failed))):
1555 valid_connections += 1
1557 # IF keepConnections does not contain cn!objectGUID
1558 # APPEND cn!objectGUID to keepConnections
1559 self.kept_connections.add(cn)
1561 # ENDFOR
1562 DEBUG_RED("valid connections %d" % valid_connections)
1563 DEBUG("kept_connections:\n%s" % (self.kept_connections,))
1564 # IF cValidConnections = 0
1565 if valid_connections == 0:
1567 # LET opt be NTDSCONN_OPT_IS_GENERATED
1568 opt = dsdb.NTDSCONN_OPT_IS_GENERATED
1570 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1571 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1572 # NTDSCONN_OPT_USE_NOTIFY in opt
1573 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1574 opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1575 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1577 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1578 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1579 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1580 opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1582 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1583 # ri.Options
1584 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1585 if ((link_opt &
1586 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
1587 opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1589 # Perform an originating update to create a new nTDSConnection
1590 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1591 # cn!options = opt, cn!transportType is a reference to t,
1592 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1593 DEBUG_FN("new connection, KCC dsa: %s" % self.my_dsa.dsa_dnstr)
1594 cn = lbh.new_connection(opt, 0, transport,
1595 rbh.dsa_dnstr, link_sched)
1597 # Display any added connection
1598 if opts.readonly:
1599 if cn.to_be_added:
1600 logger.info("TO BE ADDED:\n%s" % cn)
1602 lbh.commit_connections(self.samdb, ro=True)
1603 else:
1604 lbh.commit_connections(self.samdb)
1606 # APPEND cn!objectGUID to keepConnections
1607 self.kept_connections.add(cn)
1609 def add_transports(self, vertex, local_vertex, graph, detect_failed):
1611 # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
1612 # here, but using vertex seems to make more sense. That is,
1613 # the docs want this:
1615 #bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1616 # local_vertex.is_black(), detect_failed)
1618 # TODO WHY?????
1620 vertex.accept_red_red = []
1621 vertex.accept_black = []
1622 found_failed = False
1623 for t_guid, transport in self.transport_table.items():
1624 if transport.name != 'IP':
1625 #XXX well this is cheating a bit
1626 logging.warning("WARNING: we are ignoring a transport named %r"
1627 % transport.name)
1628 continue
1630 # FLAG_CR_NTDS_DOMAIN 0x00000002
1631 if ((vertex.is_red() and transport.name != "IP" and
1632 vertex.part.system_flags & 0x00000002)):
1633 continue
1635 if vertex not in graph.connected_vertices:
1636 continue
1638 partial_replica_okay = vertex.is_black()
1639 bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1640 partial_replica_okay, detect_failed)
1641 if bh is None:
1642 found_failed = True
1643 continue
1645 vertex.accept_red_red.append(t_guid)
1646 vertex.accept_black.append(t_guid)
1648 # Add additional transport to allow another run of Dijkstra
1649 vertex.accept_red_red.append("EDGE_TYPE_ALL")
1650 vertex.accept_black.append("EDGE_TYPE_ALL")
1652 return found_failed
1654 def create_connections(self, graph, part, detect_failed):
1655 """Construct an NC replica graph for the NC identified by
1656 the given crossRef, then create any additional nTDSConnection
1657 objects required.
1659 :param graph: site graph.
1660 :param part: crossRef object for NC.
1661 :param detect_failed: True to detect failed DCs and route
1662 replication traffic around them, False to assume no DC
1663 has failed.
1665 Modifies self.kept_connections by adding any connections
1666 deemed to be "in use".
1668 ::returns: (all_connected, found_failed_dc)
1669 (all_connected) True if the resulting NC replica graph
1670 connects all sites that need to be connected.
1671 (found_failed_dc) True if one or more failed DCs were
1672 detected.
1674 all_connected = True
1675 found_failed = False
1677 logger.debug("create_connections(): enter\n"
1678 "\tpartdn=%s\n\tdetect_failed=%s" %
1679 (part.nc_dnstr, detect_failed))
1681 # XXX - This is a highly abbreviated function from the MS-TECH
1682 # ref. It creates connections between bridgeheads to all
1683 # sites that have appropriate replicas. Thus we are not
1684 # creating a minimum cost spanning tree but instead
1685 # producing a fully connected tree. This should produce
1686 # a full (albeit not optimal cost) replication topology.
1688 my_vertex = Vertex(self.my_site, part)
1689 my_vertex.color_vertex()
1691 for v in graph.vertices:
1692 v.color_vertex()
1693 if self.add_transports(v, my_vertex, graph, False):
1694 found_failed = True
1696 # No NC replicas for this NC in the site of the local DC,
1697 # so no nTDSConnection objects need be created
1698 if my_vertex.is_white():
1699 return all_connected, found_failed
1701 edge_list, n_components = get_spanning_tree_edges(graph,
1702 self.my_site,
1703 label=part.partstr)
1705 logger.debug("%s Number of components: %d" %
1706 (part.nc_dnstr, n_components))
1707 if n_components > 1:
1708 all_connected = False
1710 # LET partialReplicaOkay be TRUE if and only if
1711 # localSiteVertex.Color = COLOR.BLACK
1712 partial_ok = my_vertex.is_black()
1714 # Utilize the IP transport only for now
1715 transport = self.ip_transport
1717 DEBUG("edge_list %s" % edge_list)
1718 for e in edge_list:
1719 # XXX more accurate comparison?
1720 if e.directed and e.vertices[0].site is self.my_site:
1721 continue
1723 if e.vertices[0].site is self.my_site:
1724 rsite = e.vertices[1].site
1725 else:
1726 rsite = e.vertices[0].site
1728 # We don't make connections to our own site as that
1729 # is intrasite topology generator's job
1730 if rsite is self.my_site:
1731 DEBUG("rsite is my_site")
1732 continue
1734 # Determine bridgehead server in remote site
1735 rbh = self.get_bridgehead(rsite, part, transport,
1736 partial_ok, detect_failed)
1737 if rbh is None:
1738 continue
1740 # RODC acts as an BH for itself
1741 # IF AmIRODC() then
1742 # LET lbh be the nTDSDSA object of the local DC
1743 # ELSE
1744 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1745 # cr, t, partialReplicaOkay, detectFailedDCs)
1746 if self.my_dsa.is_ro():
1747 lsite = self.my_site
1748 lbh = self.my_dsa
1749 else:
1750 lsite = self.my_site
1751 lbh = self.get_bridgehead(lsite, part, transport,
1752 partial_ok, detect_failed)
1753 # TODO
1754 if lbh is None:
1755 DEBUG_RED("DISASTER! lbh is None")
1756 return False, True
1758 DEBUG_CYAN("SITES")
1759 print lsite, rsite
1760 DEBUG_BLUE("vertices")
1761 print e.vertices
1762 DEBUG_BLUE("bridgeheads")
1763 print lbh, rbh
1764 DEBUG_BLUE("-" * 70)
1766 sitelink = e.site_link
1767 if sitelink is None:
1768 link_opt = 0x0
1769 link_sched = None
1770 else:
1771 link_opt = sitelink.options
1772 link_sched = sitelink.schedule
1774 self.create_connection(part, rbh, rsite, transport,
1775 lbh, lsite, link_opt, link_sched,
1776 partial_ok, detect_failed)
1778 return all_connected, found_failed
1780 def create_intersite_connections(self):
1781 """Computes an NC replica graph for each NC replica that "should be
1782 present" on the local DC or "is present" on any DC in the same site
1783 as the local DC. For each edge directed to an NC replica on such a
1784 DC from an NC replica on a DC in another site, the KCC creates an
1785 nTDSConnection object to imply that edge if one does not already
1786 exist.
1788 Modifies self.kept_connections - A set of nTDSConnection
1789 objects for edges that are directed
1790 to the local DC's site in one or more NC replica graphs.
1792 returns: True if spanning trees were created for all NC replica
1793 graphs, otherwise False.
1795 all_connected = True
1796 self.kept_connections = set()
1798 # LET crossRefList be the set containing each object o of class
1799 # crossRef such that o is a child of the CN=Partitions child of the
1800 # config NC
1802 # FOR each crossRef object cr in crossRefList
1803 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1804 # is clear in cr!systemFlags, skip cr.
1805 # LET g be the GRAPH return of SetupGraph()
1807 for part in self.part_table.values():
1809 if not part.is_enabled():
1810 continue
1812 if part.is_foreign():
1813 continue
1815 graph = self.setup_graph(part)
1817 # Create nTDSConnection objects, routing replication traffic
1818 # around "failed" DCs.
1819 found_failed = False
1821 connected, found_failed = self.create_connections(graph,
1822 part, True)
1824 DEBUG("with detect_failed: connected %s Found failed %s" %
1825 (connected, found_failed))
1826 if not connected:
1827 all_connected = False
1829 if found_failed:
1830 # One or more failed DCs preclude use of the ideal NC
1831 # replica graph. Add connections for the ideal graph.
1832 self.create_connections(graph, part, False)
1834 return all_connected
1836 def intersite(self):
1837 """The head method for generating the inter-site KCC replica
1838 connection graph and attendant nTDSConnection objects
1839 in the samdb.
1841 Produces self.kept_connections set of NTDS Connections
1842 that should be kept during subsequent pruning process.
1844 ::return (True or False): (True) if the produced NC replica
1845 graph connects all sites that need to be connected
1848 # Retrieve my DSA
1849 mydsa = self.my_dsa
1850 mysite = self.my_site
1851 all_connected = True
1853 logger.debug("intersite(): enter")
1855 # Determine who is the ISTG
1856 if opts.readonly:
1857 mysite.select_istg(self.samdb, mydsa, ro=True)
1858 else:
1859 mysite.select_istg(self.samdb, mydsa, ro=False)
1861 # Test whether local site has topology disabled
1862 if mysite.is_intersite_topology_disabled():
1863 logger.debug("intersite(): exit disabled all_connected=%d" %
1864 all_connected)
1865 return all_connected
1867 if not mydsa.is_istg():
1868 logger.debug("intersite(): exit not istg all_connected=%d" %
1869 all_connected)
1870 return all_connected
1872 self.merge_failed_links()
1874 # For each NC with an NC replica that "should be present" on the
1875 # local DC or "is present" on any DC in the same site as the
1876 # local DC, the KCC constructs a site graph--a precursor to an NC
1877 # replica graph. The site connectivity for a site graph is defined
1878 # by objects of class interSiteTransport, siteLink, and
1879 # siteLinkBridge in the config NC.
1881 all_connected = self.create_intersite_connections()
1883 logger.debug("intersite(): exit all_connected=%d" % all_connected)
1884 return all_connected
1886 def update_rodc_connection(self):
1887 """Runs when the local DC is an RODC and updates the RODC NTFRS
1888 connection object.
1890 # Given an nTDSConnection object cn1, such that cn1.options contains
1891 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1892 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1893 # that the following is true:
1895 # cn1.fromServer = cn2.fromServer
1896 # cn1.schedule = cn2.schedule
1898 # If no such cn2 can be found, cn1 is not modified.
1899 # If no such cn1 can be found, nothing is modified by this task.
1901 if not self.my_dsa.is_ro():
1902 return
1904 all_connections = self.my_dsa.connect_table.values()
1905 ro_connections = [x for x in all_connections if x.is_rodc_topology()]
1906 rw_connections = [x for x in all_connections
1907 if x not in ro_connections]
1909 # XXX here we are dealing with multiple RODC_TOPO connections,
1910 # if they exist. It is not clear whether the spec means that
1911 # or if it ever arises.
1912 if rw_connections and ro_connections:
1913 for con in ro_connections:
1914 cn2 = rw_connections[0]
1915 con.from_dnstr = cn2.from_dnstr
1916 con.schedule = cn2.schedule
1917 con.to_be_modified = True
1919 self.my_dsa.commit_connections(self.samdb, ro=opts.readonly)
1921 def intrasite_max_node_edges(self, node_count):
1922 """Returns the maximum number of edges directed to a node in
1923 the intrasite replica graph.
1925 The KCC does not create more
1926 than 50 edges directed to a single DC. To optimize replication,
1927 we compute that each node should have n+2 total edges directed
1928 to it such that (n) is the smallest non-negative integer
1929 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1931 (If the number of edges is m (i.e. n + 2), that is the same as
1932 2 * m*m - 2 * m + 3).
1934 edges n nodecount
1935 2 0 7
1936 3 1 15
1937 4 2 27
1938 5 3 43
1940 50 48 4903
1942 :param node_count: total number of nodes in the replica graph
1944 The intention is that there should be no more than 3 hops
1945 between any two DSAs at a site. With up to 7 nodes the 2 edges
1946 of the ring are enough; any configuration of extra edges with
1947 8 nodes will be enough. It is less clear that the 3 hop
1948 guarantee holds at e.g. 15 nodes in degenerate cases, but
1949 those are quite unlikely given the extra edges are randomly
1950 arranged.
1952 n = 0
1953 while True:
1954 if node_count <= (2 * (n * n) + (6 * n) + 7):
1955 break
1956 n = n + 1
1957 n = n + 2
1958 if n < 50:
1959 return n
1960 return 50
1962 def construct_intrasite_graph(self, site_local, dc_local,
1963 nc_x, gc_only, detect_stale):
1964 # [MS-ADTS] 6.2.2.2
1965 # We're using the MS notation names here to allow
1966 # correlation back to the published algorithm.
1968 # nc_x - naming context (x) that we are testing if it
1969 # "should be present" on the local DC
1970 # f_of_x - replica (f) found on a DC (s) for NC (x)
1971 # dc_s - DC where f_of_x replica was found
1972 # dc_local - local DC that potentially needs a replica
1973 # (f_of_x)
1974 # r_list - replica list R
1975 # p_of_x - replica (p) is partial and found on a DC (s)
1976 # for NC (x)
1977 # l_of_x - replica (l) is the local replica for NC (x)
1978 # that should appear on the local DC
1979 # r_len = is length of replica list |R|
1981 # If the DSA doesn't need a replica for this
1982 # partition (NC x) then continue
1983 needed, ro, partial = nc_x.should_be_present(dc_local)
1985 DEBUG_YELLOW("construct_intrasite_graph(): enter" +
1986 "\n\tgc_only=%d" % gc_only +
1987 "\n\tdetect_stale=%d" % detect_stale +
1988 "\n\tneeded=%s" % needed +
1989 "\n\tro=%s" % ro +
1990 "\n\tpartial=%s" % partial +
1991 "\n%s" % nc_x)
1993 if not needed:
1994 DEBUG_RED("%s lacks 'should be present' status, "
1995 "aborting construct_intersite_graph!" %
1996 nc_x.nc_dnstr)
1997 return
1999 # Create a NCReplica that matches what the local replica
2000 # should say. We'll use this below in our r_list
2001 l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
2002 nc_x.nc_dnstr)
2004 l_of_x.identify_by_basedn(self.samdb)
2006 l_of_x.rep_partial = partial
2007 l_of_x.rep_ro = ro
2009 # Add this replica that "should be present" to the
2010 # needed replica table for this DSA
2011 dc_local.add_needed_replica(l_of_x)
2013 # Replica list
2015 # Let R be a sequence containing each writable replica f of x
2016 # such that f "is present" on a DC s satisfying the following
2017 # criteria:
2019 # * s is a writable DC other than the local DC.
2021 # * s is in the same site as the local DC.
2023 # * If x is a read-only full replica and x is a domain NC,
2024 # then the DC's functional level is at least
2025 # DS_BEHAVIOR_WIN2008.
2027 # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
2028 # in the options attribute of the site settings object for
2029 # the local DC's site, or no tuple z exists in the
2030 # kCCFailedLinks or kCCFailedConnections variables such
2031 # that z.UUIDDsa is the objectGUID of the nTDSDSA object
2032 # for s, z.FailureCount > 0, and the current time -
2033 # z.TimeFirstFailure > 2 hours.
2035 r_list = []
2037 # We'll loop thru all the DSAs looking for
2038 # writeable NC replicas that match the naming
2039 # context dn for (nc_x)
2041 for dc_s in self.my_site.dsa_table.values():
2042 # If this partition (nc_x) doesn't appear as a
2043 # replica (f_of_x) on (dc_s) then continue
2044 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2045 continue
2047 # Pull out the NCReplica (f) of (x) with the dn
2048 # that matches NC (x) we are examining.
2049 f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2051 # Replica (f) of NC (x) must be writable
2052 if f_of_x.is_ro():
2053 continue
2055 # Replica (f) of NC (x) must satisfy the
2056 # "is present" criteria for DC (s) that
2057 # it was found on
2058 if not f_of_x.is_present():
2059 continue
2061 # DC (s) must be a writable DSA other than
2062 # my local DC. In other words we'd only replicate
2063 # from other writable DC
2064 if dc_s.is_ro() or dc_s is dc_local:
2065 continue
2067 # Certain replica graphs are produced only
2068 # for global catalogs, so test against
2069 # method input parameter
2070 if gc_only and not dc_s.is_gc():
2071 continue
2073 # DC (s) must be in the same site as the local DC
2074 # as this is the intra-site algorithm. This is
2075 # handled by virtue of placing DSAs in per
2076 # site objects (see enclosing for() loop)
2078 # If NC (x) is intended to be read-only full replica
2079 # for a domain NC on the target DC then the source
2080 # DC should have functional level at minimum WIN2008
2082 # Effectively we're saying that in order to replicate
2083 # to a targeted RODC (which was introduced in Windows 2008)
2084 # then we have to replicate from a DC that is also minimally
2085 # at that level.
2087 # You can also see this requirement in the MS special
2088 # considerations for RODC which state that to deploy
2089 # an RODC, at least one writable domain controller in
2090 # the domain must be running Windows Server 2008
2091 if ro and not partial and nc_x.nc_type == NCType.domain:
2092 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2093 continue
2095 # If we haven't been told to turn off stale connection
2096 # detection and this dsa has a stale connection then
2097 # continue
2098 if detect_stale and self.is_stale_link_connection(dc_s):
2099 continue
2101 # Replica meets criteria. Add it to table indexed
2102 # by the GUID of the DC that it appears on
2103 r_list.append(f_of_x)
2105 # If a partial (not full) replica of NC (x) "should be present"
2106 # on the local DC, append to R each partial replica (p of x)
2107 # such that p "is present" on a DC satisfying the same
2108 # criteria defined above for full replica DCs.
2110 # XXX This loop and the previous one differ only in whether
2111 # the replica is partial or not. here we only accept partial
2112 # (because we're partial); before we only accepted full. Order
2113 # doen't matter (the list is sorted a few lines down) so these
2114 # loops could easily be merged. Or this could be a helper
2115 # function.
2117 if partial:
2118 # Now we loop thru all the DSAs looking for
2119 # partial NC replicas that match the naming
2120 # context dn for (NC x)
2121 for dc_s in self.my_site.dsa_table.values():
2123 # If this partition NC (x) doesn't appear as a
2124 # replica (p) of NC (x) on the dsa DC (s) then
2125 # continue
2126 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2127 continue
2129 # Pull out the NCReplica with the dn that
2130 # matches NC (x) we are examining.
2131 p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2133 # Replica (p) of NC (x) must be partial
2134 if not p_of_x.is_partial():
2135 continue
2137 # Replica (p) of NC (x) must satisfy the
2138 # "is present" criteria for DC (s) that
2139 # it was found on
2140 if not p_of_x.is_present():
2141 continue
2143 # DC (s) must be a writable DSA other than
2144 # my DSA. In other words we'd only replicate
2145 # from other writable DSA
2146 if dc_s.is_ro() or dc_s is dc_local:
2147 continue
2149 # Certain replica graphs are produced only
2150 # for global catalogs, so test against
2151 # method input parameter
2152 if gc_only and not dc_s.is_gc():
2153 continue
2155 # If we haven't been told to turn off stale connection
2156 # detection and this dsa has a stale connection then
2157 # continue
2158 if detect_stale and self.is_stale_link_connection(dc_s):
2159 continue
2161 # Replica meets criteria. Add it to table indexed
2162 # by the GUID of the DSA that it appears on
2163 r_list.append(p_of_x)
2165 # Append to R the NC replica that "should be present"
2166 # on the local DC
2167 r_list.append(l_of_x)
2169 r_list.sort(sort_replica_by_dsa_guid)
2170 r_len = len(r_list)
2172 max_node_edges = self.intrasite_max_node_edges(r_len)
2174 # Add a node for each r_list element to the replica graph
2175 graph_list = []
2176 for rep in r_list:
2177 node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
2178 graph_list.append(node)
2180 # For each r(i) from (0 <= i < |R|-1)
2181 i = 0
2182 while i < (r_len-1):
2183 # Add an edge from r(i) to r(i+1) if r(i) is a full
2184 # replica or r(i+1) is a partial replica
2185 if not r_list[i].is_partial() or r_list[i+1].is_partial():
2186 graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)
2188 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2189 # replica or ri is a partial replica.
2190 if not r_list[i+1].is_partial() or r_list[i].is_partial():
2191 graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
2192 i = i + 1
2194 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2195 # or r0 is a partial replica.
2196 if not r_list[r_len-1].is_partial() or r_list[0].is_partial():
2197 graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr)
2199 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2200 # r|R|-1 is a partial replica.
2201 if not r_list[0].is_partial() or r_list[r_len-1].is_partial():
2202 graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr)
2204 DEBUG("r_list is length %s" % len(r_list))
2205 DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr))
2206 for x in r_list))
2208 do_dot_files = opts.dot_files and opts.debug
2209 if opts.verify or do_dot_files:
2210 dot_edges = []
2211 dot_vertices = set()
2212 for v1 in graph_list:
2213 dot_vertices.add(v1.dsa_dnstr)
2214 for v2 in v1.edge_from:
2215 dot_edges.append((v2, v1.dsa_dnstr))
2216 dot_vertices.add(v2)
2218 verify_properties = ('connected', 'directed_double_ring_or_small')
2219 verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
2220 label='%s__%s__%s' % (site_local.site_dnstr,
2221 nctype_lut[nc_x.nc_type],
2222 nc_x.nc_dnstr),
2223 properties=verify_properties, debug=DEBUG,
2224 verify=opts.verify,
2225 dot_files=do_dot_files, directed=True)
2227 # For each existing nTDSConnection object implying an edge
2228 # from rj of R to ri such that j != i, an edge from rj to ri
2229 # is not already in the graph, and the total edges directed
2230 # to ri is less than n+2, the KCC adds that edge to the graph.
2231 for vertex in graph_list:
2232 dsa = self.my_site.dsa_table[vertex.dsa_dnstr]
2233 for connect in dsa.connect_table.values():
2234 remote = connect.from_dnstr
2235 if remote in self.my_site.dsa_table:
2236 vertex.add_edge_from(remote)
2238 DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list))
2239 DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list))
2241 for tnode in graph_list:
2242 # To optimize replication latency in sites with many NC
2243 # replicas, the KCC adds new edges directed to ri to bring
2244 # the total edges to n+2, where the NC replica rk of R
2245 # from which the edge is directed is chosen at random such
2246 # that k != i and an edge from rk to ri is not already in
2247 # the graph.
2249 # Note that the KCC tech ref does not give a number for
2250 # the definition of "sites with many NC replicas". At a
2251 # bare minimum to satisfy n+2 edges directed at a node we
2252 # have to have at least three replicas in |R| (i.e. if n
2253 # is zero then at least replicas from two other graph
2254 # nodes may direct edges to us).
2255 if r_len >= 3 and not tnode.has_sufficient_edges():
2256 candidates = [x for x in graph_list if
2257 (x is not tnode and
2258 x.dsa_dnstr not in tnode.edge_from)]
2260 DEBUG_BLUE("looking for random link for %s. r_len %d, "
2261 "graph len %d candidates %d"
2262 % (tnode.dsa_dnstr, r_len, len(graph_list),
2263 len(candidates)))
2265 DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])
2267 while candidates and not tnode.has_sufficient_edges():
2268 other = random.choice(candidates)
2269 DEBUG("trying to add candidate %s" % other.dsa_dstr)
2270 if not tnode.add_edge_from(other):
2271 DEBUG_RED("could not add %s" % other.dsa_dstr)
2272 candidates.remove(other)
2273 else:
2274 DEBUG_FN("not adding links to %s: nodes %s, links is %s/%s" %
2275 (tnode.dsa_dnstr, r_len, len(tnode.edge_from),
2276 tnode.max_edges))
2278 # Print the graph node in debug mode
2279 logger.debug("%s" % tnode)
2281 # For each edge directed to the local DC, ensure a nTDSConnection
2282 # points to us that satisfies the KCC criteria
2284 if tnode.dsa_dnstr == dc_local.dsa_dnstr:
2285 tnode.add_connections_from_edges(dc_local)
2287 if opts.verify or do_dot_files:
2288 dot_edges = []
2289 dot_vertices = set()
2290 for v1 in graph_list:
2291 dot_vertices.add(v1.dsa_dnstr)
2292 for v2 in v1.edge_from:
2293 dot_edges.append((v2, v1.dsa_dnstr))
2294 dot_vertices.add(v2)
2296 verify_properties = ('connected', 'directed_double_ring_or_small')
2297 verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
2298 label='%s__%s__%s' % (site_local.site_dnstr,
2299 nctype_lut[nc_x.nc_type],
2300 nc_x.nc_dnstr),
2301 properties=verify_properties, debug=DEBUG,
2302 verify=opts.verify,
2303 dot_files=do_dot_files, directed=True)
2305 def intrasite(self):
2306 """The head method for generating the intra-site KCC replica
2307 connection graph and attendant nTDSConnection objects
2308 in the samdb
2310 # Retrieve my DSA
2311 mydsa = self.my_dsa
2313 logger.debug("intrasite(): enter")
2315 # Test whether local site has topology disabled
2316 mysite = self.my_site
2317 if mysite.is_intrasite_topology_disabled():
2318 return
2320 detect_stale = (not mysite.is_detect_stale_disabled())
2321 for connect in mydsa.connect_table.values():
2322 if connect.to_be_added:
2323 DEBUG_CYAN("TO BE ADDED:\n%s" % connect)
2325 # Loop thru all the partitions, with gc_only False
2326 for partdn, part in self.part_table.items():
2327 self.construct_intrasite_graph(mysite, mydsa, part, False,
2328 detect_stale)
2329 for connect in mydsa.connect_table.values():
2330 if connect.to_be_added:
2331 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2333 # If the DC is a GC server, the KCC constructs an additional NC
2334 # replica graph (and creates nTDSConnection objects) for the
2335 # config NC as above, except that only NC replicas that "are present"
2336 # on GC servers are added to R.
2337 for connect in mydsa.connect_table.values():
2338 if connect.to_be_added:
2339 DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)
2341 # Do it again, with gc_only True
2342 for partdn, part in self.part_table.items():
2343 if part.is_config():
2344 self.construct_intrasite_graph(mysite, mydsa, part, True,
2345 detect_stale)
2347 # The DC repeats the NC replica graph computation and nTDSConnection
2348 # creation for each of the NC replica graphs, this time assuming
2349 # that no DC has failed. It does so by re-executing the steps as
2350 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2351 # set in the options attribute of the site settings object for
2352 # the local DC's site. (ie. we set "detec_stale" flag to False)
2353 for connect in mydsa.connect_table.values():
2354 if connect.to_be_added:
2355 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2357 # Loop thru all the partitions.
2358 for partdn, part in self.part_table.items():
2359 self.construct_intrasite_graph(mysite, mydsa, part, False,
2360 False) # don't detect stale
2362 # If the DC is a GC server, the KCC constructs an additional NC
2363 # replica graph (and creates nTDSConnection objects) for the
2364 # config NC as above, except that only NC replicas that "are present"
2365 # on GC servers are added to R.
2366 for connect in mydsa.connect_table.values():
2367 if connect.to_be_added:
2368 DEBUG_RED("TO BE ADDED:\n%s" % connect)
2370 for partdn, part in self.part_table.items():
2371 if part.is_config():
2372 self.construct_intrasite_graph(mysite, mydsa, part, True,
2373 False) # don't detect stale
2375 if opts.readonly:
2376 # Display any to be added or modified repsFrom
2377 for connect in mydsa.connect_table.values():
2378 if connect.to_be_deleted:
2379 logger.info("TO BE DELETED:\n%s" % connect)
2380 if connect.to_be_modified:
2381 logger.info("TO BE MODIFIED:\n%s" % connect)
2382 if connect.to_be_added:
2383 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
2385 mydsa.commit_connections(self.samdb, ro=True)
2386 else:
2387 # Commit any newly created connections to the samdb
2388 mydsa.commit_connections(self.samdb)
2390 def list_dsas(self):
2391 self.load_my_site()
2392 self.load_my_dsa()
2394 self.load_all_sites()
2395 self.load_all_partitions()
2396 self.load_all_transports()
2397 self.load_all_sitelinks()
2398 dsas = []
2399 for site in self.site_table.values():
2400 dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1)
2401 for dsa in site.dsa_table.values()])
2402 return dsas
2404 def load_samdb(self, dburl, lp, creds):
2405 self.samdb = SamDB(url=dburl,
2406 session_info=system_session(),
2407 credentials=creds, lp=lp)
2409 def plot_all_connections(self, basename, verify_properties=()):
2410 verify = verify_properties and opts.verify
2411 plot = opts.dot_files
2412 if not (verify or plot):
2413 return
2415 dot_edges = []
2416 dot_vertices = []
2417 edge_colours = []
2418 vertex_colours = []
2420 for dsa in self.dsa_by_dnstr.values():
2421 dot_vertices.append(dsa.dsa_dnstr)
2422 if dsa.is_ro():
2423 vertex_colours.append('#cc0000')
2424 else:
2425 vertex_colours.append('#0000cc')
2426 for con in dsa.connect_table.values():
2427 if con.is_rodc_topology():
2428 edge_colours.append('red')
2429 else:
2430 edge_colours.append('blue')
2431 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
2433 verify_and_dot(basename, dot_edges, vertices=dot_vertices,
2434 label=self.my_dsa_dnstr, properties=verify_properties,
2435 debug=DEBUG, verify=verify, dot_files=plot,
2436 directed=True, edge_colors=edge_colours,
2437 vertex_colors=vertex_colours)
2439 def run(self, dburl, lp, creds, forced_local_dsa=None,
2440 forget_local_links=False, forget_intersite_links=False):
2441 """Method to perform a complete run of the KCC and
2442 produce an updated topology for subsequent NC replica
2443 syncronization between domain controllers
2445 # We may already have a samdb setup if we are
2446 # currently importing an ldif for a test run
2447 if self.samdb is None:
2448 try:
2449 self.load_samdb(dburl, lp, creds)
2450 except ldb.LdbError, (num, msg):
2451 logger.error("Unable to open sam database %s : %s" %
2452 (dburl, msg))
2453 return 1
2455 if forced_local_dsa:
2456 self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" %
2457 forced_local_dsa)
2459 try:
2460 # Setup
2461 self.load_my_site()
2462 self.load_my_dsa()
2464 self.load_all_sites()
2465 self.load_all_partitions()
2466 self.load_all_transports()
2467 self.load_all_sitelinks()
2469 if opts.verify or opts.dot_files:
2470 guid_to_dnstr = {}
2471 for site in self.site_table.values():
2472 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
2473 for dnstr, dsa
2474 in site.dsa_table.items())
2476 self.plot_all_connections('dsa_initial')
2478 dot_edges = []
2479 current_reps, needed_reps = self.my_dsa.get_rep_tables()
2480 for dnstr, c_rep in current_reps.items():
2481 DEBUG("c_rep %s" % c_rep)
2482 dot_edges.append((self.my_dsa.dsa_dnstr, dnstr))
2484 verify_and_dot('dsa_repsFrom_initial', dot_edges,
2485 directed=True, label=self.my_dsa_dnstr,
2486 properties=(), debug=DEBUG, verify=opts.verify,
2487 dot_files=opts.dot_files)
2489 dot_edges = []
2490 for site in self.site_table.values():
2491 for dsa in site.dsa_table.values():
2492 current_reps, needed_reps = dsa.get_rep_tables()
2493 for dn_str, rep in current_reps.items():
2494 for reps_from in rep.rep_repsFrom:
2495 DEBUG("rep %s" % rep)
2496 dsa_guid = str(reps_from.source_dsa_obj_guid)
2497 dsa_dn = guid_to_dnstr[dsa_guid]
2498 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2500 verify_and_dot('dsa_repsFrom_initial_all', dot_edges,
2501 directed=True, label=self.my_dsa_dnstr,
2502 properties=(), debug=DEBUG, verify=opts.verify,
2503 dot_files=opts.dot_files)
2505 dot_edges = []
2506 for link in self.sitelink_table.values():
2507 for a, b in itertools.combinations(link.site_list, 2):
2508 dot_edges.append((str(a), str(b)))
2509 properties = ('connected',)
2510 verify_and_dot('dsa_sitelink_initial', dot_edges,
2511 directed=False,
2512 label=self.my_dsa_dnstr, properties=properties,
2513 debug=DEBUG, verify=opts.verify,
2514 dot_files=opts.dot_files)
2516 if forget_local_links:
2517 for dsa in self.my_site.dsa_table.values():
2518 dsa.connect_table = {k: v for k, v in
2519 dsa.connect_table.items()
2520 if v.is_rodc_topology()}
2521 self.plot_all_connections('dsa_forgotten_local')
2523 if forget_intersite_links:
2524 for site in self.site_table.values():
2525 for dsa in site.dsa_table.values():
2526 dsa.connect_table = {k: v for k, v in
2527 dsa.connect_table.items()
2528 if site is self.my_site and
2529 v.is_rodc_topology()}
2531 self.plot_all_connections('dsa_forgotten_all')
2532 # These are the published steps (in order) for the
2533 # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)
2535 # Step 1
2536 self.refresh_failed_links_connections()
2538 # Step 2
2539 self.intrasite()
2541 # Step 3
2542 all_connected = self.intersite()
2544 # Step 4
2545 self.remove_unneeded_ntdsconn(all_connected)
2547 # Step 5
2548 self.translate_ntdsconn()
2550 # Step 6
2551 self.remove_unneeded_failed_links_connections()
2553 # Step 7
2554 self.update_rodc_connection()
2556 if opts.verify or opts.dot_files:
2557 self.plot_all_connections('dsa_final',
2558 ('connected', 'forest_of_rings'))
2560 DEBUG_MAGENTA("there are %d dsa guids" % len(guid_to_dnstr))
2562 dot_edges = []
2563 edge_colors = []
2564 my_dnstr = self.my_dsa.dsa_dnstr
2565 current_reps, needed_reps = self.my_dsa.get_rep_tables()
2566 for dnstr, n_rep in needed_reps.items():
2567 for reps_from in n_rep.rep_repsFrom:
2568 guid_str = str(reps_from.source_dsa_obj_guid)
2569 dot_edges.append((my_dnstr, guid_to_dnstr[guid_str]))
2570 edge_colors.append('#' + str(n_rep.nc_guid)[:6])
2572 verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True,
2573 label=self.my_dsa_dnstr,
2574 properties=(), debug=DEBUG, verify=opts.verify,
2575 dot_files=opts.dot_files,
2576 edge_colors=edge_colors)
2578 dot_edges = []
2580 for site in self.site_table.values():
2581 for dsa in site.dsa_table.values():
2582 current_reps, needed_reps = dsa.get_rep_tables()
2583 for n_rep in needed_reps.values():
2584 for reps_from in n_rep.rep_repsFrom:
2585 dsa_guid = str(reps_from.source_dsa_obj_guid)
2586 dsa_dn = guid_to_dnstr[dsa_guid]
2587 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2589 verify_and_dot('dsa_repsFrom_final_all', dot_edges,
2590 directed=True, label=self.my_dsa_dnstr,
2591 properties=(), debug=DEBUG, verify=opts.verify,
2592 dot_files=opts.dot_files)
2594 except:
2595 raise
2597 return 0
2599 def import_ldif(self, dburl, lp, creds, ldif_file):
2600 """Import all objects and attributes that are relevent
2601 to the KCC algorithms from a previously exported LDIF file.
2603 The point of this function is to allow a programmer/debugger to
2604 import an LDIF file with non-security relevent information that
2605 was previously extracted from a DC database. The LDIF file is used
2606 to create a temporary abbreviated database. The KCC algorithm can
2607 then run against this abbreviated database for debug or test
2608 verification that the topology generated is computationally the
2609 same between different OSes and algorithms.
2611 :param dburl: path to the temporary abbreviated db to create
2612 :param ldif_file: path to the ldif file to import
2614 try:
2615 self.samdb = ldif_utils.ldif_to_samdb(dburl, lp, ldif_file,
2616 opts.forced_local_dsa)
2617 except ldif_utils.LdifError, e:
2618 print e
2619 return 1
2620 return 0
2622 def export_ldif(self, dburl, lp, creds, ldif_file):
2623 """Routine to extract all objects and attributes that are relevent
2624 to the KCC algorithms from a DC database.
2626 The point of this function is to allow a programmer/debugger to
2627 extract an LDIF file with non-security relevent information from
2628 a DC database. The LDIF file can then be used to "import" via
2629 the import_ldif() function this file into a temporary abbreviated
2630 database. The KCC algorithm can then run against this abbreviated
2631 database for debug or test verification that the topology generated
2632 is computationally the same between different OSes and algorithms.
2634 :param dburl: LDAP database URL to extract info from
2635 :param ldif_file: output LDIF file name to create
2637 try:
2638 ldif_utils.samdb_to_ldif_file(self.samdb, dburl, lp, creds,
2639 ldif_file)
2640 except ldif_utils.LdifError, e:
2641 print e
2642 return 1
2643 return 0
2645 ##################################################
2646 # Global Functions
2647 ##################################################
2650 def get_spanning_tree_edges(graph, my_site, label=None):
2651 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
2652 # just the shortest-paths connecting colored vertices
2654 internal_edges = set()
2656 for e_set in graph.edge_set:
2657 edgeType = None
2658 for v in graph.vertices:
2659 v.edges = []
2661 # All con_type in an edge set is the same
2662 for e in e_set.edges:
2663 edgeType = e.con_type
2664 for v in e.vertices:
2665 v.edges.append(e)
2667 if opts.verify or opts.dot_files:
2668 graph_edges = [(a.site.site_dnstr, b.site.site_dnstr)
2669 for a, b in
2670 itertools.chain(
2671 *(itertools.combinations(edge.vertices, 2)
2672 for edge in e_set.edges))]
2673 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2675 if opts.dot_files and opts.debug:
2676 write_dot_file('edgeset_%s' % (edgeType,), graph_edges,
2677 vertices=graph_nodes, label=label)
2679 if opts.verify:
2680 verify_graph('spanning tree edge set %s' % edgeType,
2681 graph_edges, vertices=graph_nodes,
2682 properties=('complete', 'connected'),
2683 debug=DEBUG)
2685 # Run dijkstra's algorithm with just the red vertices as seeds
2686 # Seed from the full replicas
2687 dijkstra(graph, edgeType, False)
2689 # Process edge set
2690 process_edge_set(graph, e_set, internal_edges)
2692 # Run dijkstra's algorithm with red and black vertices as the seeds
2693 # Seed from both full and partial replicas
2694 dijkstra(graph, edgeType, True)
2696 # Process edge set
2697 process_edge_set(graph, e_set, internal_edges)
2699 # All vertices have root/component as itself
2700 setup_vertices(graph)
2701 process_edge_set(graph, None, internal_edges)
2703 if opts.verify or opts.dot_files:
2704 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
2705 for e in internal_edges]
2706 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2707 verify_properties = ('multi_edge_forest',)
2708 verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label,
2709 properties=verify_properties, debug=DEBUG,
2710 verify=opts.verify,
2711 dot_files=opts.dot_files)
2713 # Phase 2: Run Kruskal's on the internal edges
2714 output_edges, components = kruskal(graph, internal_edges)
2716 # This recalculates the cost for the path connecting the
2717 # closest red vertex. Ignoring types is fine because NO
2718 # suboptimal edge should exist in the graph
2719 dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename
2720 # Phase 3: Process the output
2721 for v in graph.vertices:
2722 if v.is_red():
2723 v.dist_to_red = 0
2724 else:
2725 v.dist_to_red = v.repl_info.cost
2727 if opts.verify or opts.dot_files:
2728 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
2729 for e in internal_edges]
2730 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2731 verify_properties = ('multi_edge_forest',)
2732 verify_and_dot('postkruskal', graph_edges, graph_nodes,
2733 label=label, properties=verify_properties,
2734 debug=DEBUG, verify=opts.verify,
2735 dot_files=opts.dot_files)
2737 # Ensure only one-way connections for partial-replicas,
2738 # and make sure they point the right way.
2739 edge_list = []
2740 for edge in output_edges:
2741 # We know these edges only have two endpoints because we made
2742 # them.
2743 v, w = edge.vertices
2744 if v.site is my_site or w.site is my_site:
2745 if (((v.is_black() or w.is_black()) and
2746 v.dist_to_red != MAX_DWORD)):
2747 edge.directed = True
2749 if w.dist_to_red < v.dist_to_red:
2750 edge.vertices[:] = w, v
2751 edge_list.append(edge)
2753 if opts.verify or opts.dot_files:
2754 graph_edges = [[x.site.site_dnstr for x in e.vertices]
2755 for e in edge_list]
2756 #add the reverse edge if not directed.
2757 graph_edges.extend([x.site.site_dnstr
2758 for x in reversed(e.vertices)]
2759 for e in edge_list if not e.directed)
2760 graph_nodes = [x.site.site_dnstr for x in graph.vertices]
2761 verify_properties = ()
2762 verify_and_dot('post-one-way-partial', graph_edges, graph_nodes,
2763 label=label, properties=verify_properties,
2764 debug=DEBUG, verify=opts.verify,
2765 directed=True,
2766 dot_files=opts.dot_files)
2768 # count the components
2769 return edge_list, components
2772 def sort_replica_by_dsa_guid(rep1, rep2):
2773 """Helper to sort NCReplicas by their DSA guids
2775 The guids need to be sorted in their NDR form.
2777 :param rep1: An NC replica
2778 :param rep2: Another replica
2779 :return: -1, 0, or 1, indicating sort order.
2781 return cmp(ndr_pack(rep1.rep_dsa_guid), ndr_pack(rep2.rep_dsa_guid))
2784 def sort_dsa_by_gc_and_guid(dsa1, dsa2):
2785 """Helper to sort DSAs by guid global catalog status
2787 GC DSAs come before non-GC DSAs, other than that, the guids are
2788 sorted in NDR form.
2790 :param dsa1: A DSA object
2791 :param dsa2: Another DSA
2792 :return: -1, 0, or 1, indicating sort order.
2794 if dsa1.is_gc() and not dsa2.is_gc():
2795 return -1
2796 if not dsa1.is_gc() and dsa2.is_gc():
2797 return +1
2798 return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))
2801 def is_smtp_replication_available():
2802 """Can the KCC use SMTP replication?
2804 Currently always returns false because Samba doesn't implement
2805 SMTP transfer for NC changes between DCs.
2807 :return: Boolean (always False)
2809 return False
2812 def create_edge(con_type, site_link, guid_to_vertex):
2813 e = MultiEdge()
2814 e.site_link = site_link
2815 e.vertices = []
2816 for site_guid in site_link.site_list:
2817 if str(site_guid) in guid_to_vertex:
2818 e.vertices.extend(guid_to_vertex.get(str(site_guid)))
2819 e.repl_info.cost = site_link.cost
2820 e.repl_info.options = site_link.options
2821 e.repl_info.interval = site_link.interval
2822 e.repl_info.schedule = convert_schedule_to_repltimes(site_link.schedule)
2823 e.con_type = con_type
2824 e.directed = False
2825 return e
2828 def create_auto_edge_set(graph, transport):
2829 e_set = MultiEdgeSet()
2830 # use a NULL guid, not associated with a SiteLinkBridge object
2831 e_set.guid = misc.GUID()
2832 for site_link in graph.edges:
2833 if site_link.con_type == transport:
2834 e_set.edges.append(site_link)
2836 return e_set
2839 def create_edge_set(graph, transport, site_link_bridge):
2840 # TODO not implemented - need to store all site link bridges
2841 e_set = MultiEdgeSet()
2842 # e_set.guid = site_link_bridge
2843 return e_set
2846 def setup_vertices(graph):
2847 for v in graph.vertices:
2848 if v.is_white():
2849 v.repl_info.cost = MAX_DWORD
2850 v.root = None
2851 v.component_id = None
2852 else:
2853 v.repl_info.cost = 0
2854 v.root = v
2855 v.component_id = v
2857 v.repl_info.interval = 0
2858 v.repl_info.options = 0xFFFFFFFF
2859 v.repl_info.schedule = None # TODO highly suspicious
2860 v.demoted = False
2863 def dijkstra(graph, edge_type, include_black):
2864 queue = []
2865 setup_dijkstra(graph, edge_type, include_black, queue)
2866 while len(queue) > 0:
2867 cost, guid, vertex = heapq.heappop(queue)
2868 for edge in vertex.edges:
2869 for v in edge.vertices:
2870 if v is not vertex:
2871 # add new path from vertex to v
2872 try_new_path(graph, queue, vertex, edge, v)
2875 def setup_dijkstra(graph, edge_type, include_black, queue):
2876 setup_vertices(graph)
2877 for vertex in graph.vertices:
2878 if vertex.is_white():
2879 continue
2881 if (((vertex.is_black() and not include_black)
2882 or edge_type not in vertex.accept_black
2883 or edge_type not in vertex.accept_red_red)):
2884 vertex.repl_info.cost = MAX_DWORD
2885 vertex.root = None # NULL GUID
2886 vertex.demoted = True # Demoted appears not to be used
2887 else:
2888 heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex))
2891 def try_new_path(graph, queue, vfrom, edge, vto):
2892 newRI = ReplInfo()
2893 # What this function checks is that there is a valid time frame for
2894 # which replication can actually occur, despite being adequately
2895 # connected
2896 intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI)
2898 # If the new path costs more than the current, then ignore the edge
2899 if newRI.cost > vto.repl_info.cost:
2900 return
2902 if newRI.cost < vto.repl_info.cost and not intersect:
2903 return
2905 new_duration = total_schedule(newRI.schedule)
2906 old_duration = total_schedule(vto.repl_info.schedule)
2908 # Cheaper or longer schedule
2909 if newRI.cost < vto.repl_info.cost or new_duration > old_duration:
2910 vto.root = vfrom.root
2911 vto.component_id = vfrom.component_id
2912 vto.repl_info = newRI
2913 heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))
2916 def check_demote_vertex(vertex, edge_type):
2917 if vertex.is_white():
2918 return
2920 # Accepts neither red-red nor black edges, demote
2921 if ((edge_type not in vertex.accept_black and
2922 edge_type not in vertex.accept_red_red)):
2923 vertex.repl_info.cost = MAX_DWORD
2924 vertex.root = None
2925 vertex.demoted = True # Demoted appears not to be used
2928 def undemote_vertex(vertex):
2929 if vertex.is_white():
2930 return
2932 vertex.repl_info.cost = 0
2933 vertex.root = vertex
2934 vertex.demoted = False
2937 def process_edge_set(graph, e_set, internal_edges):
2938 if e_set is None:
2939 for edge in graph.edges:
2940 for vertex in edge.vertices:
2941 check_demote_vertex(vertex, edge.con_type)
2942 process_edge(graph, edge, internal_edges)
2943 for vertex in edge.vertices:
2944 undemote_vertex(vertex)
2945 else:
2946 for edge in e_set.edges:
2947 process_edge(graph, edge, internal_edges)
2950 def process_edge(graph, examine, internal_edges):
2951 # Find the set of all vertices touches the edge to examine
2952 vertices = []
2953 for v in examine.vertices:
2954 # Append a 4-tuple of color, repl cost, guid and vertex
2955 vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v))
2956 # Sort by color, lower
2957 DEBUG("vertices is %s" % vertices)
2958 vertices.sort()
2960 color, cost, guid, bestv = vertices[0]
2961 # Add to internal edges an edge from every colored vertex to bestV
2962 for v in examine.vertices:
2963 if v.component_id is None or v.root is None:
2964 continue
2966 # Only add edge if valid inter-tree edge - needs a root and
2967 # different components
2968 if ((bestv.component_id is not None and
2969 bestv.root is not None and
2970 v.component_id is not None and
2971 v.root is not None and
2972 bestv.component_id != v.component_id)):
2973 add_int_edge(graph, internal_edges, examine, bestv, v)
2976 # Add internal edge, endpoints are roots of the vertices to pass in
2977 # and are always colored
2978 def add_int_edge(graph, internal_edges, examine, v1, v2):
2979 root1 = v1.root
2980 root2 = v2.root
2982 red_red = False
2983 if root1.is_red() and root2.is_red():
2984 red_red = True
2986 if red_red:
2987 if ((examine.con_type not in root1.accept_red_red
2988 or examine.con_type not in root2.accept_red_red)):
2989 return
2990 elif (examine.con_type not in root1.accept_black
2991 or examine.con_type not in root2.accept_black):
2992 return
2994 ri = ReplInfo()
2995 ri2 = ReplInfo()
2997 # Create the transitive replInfo for the two trees and this edge
2998 if not combine_repl_info(v1.repl_info, v2.repl_info, ri):
2999 return
3000 # ri is now initialized
3001 if not combine_repl_info(ri, examine.repl_info, ri2):
3002 return
3004 newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type,
3005 examine.site_link)
3006 # Order by vertex guid
3007 #XXX guid comparison using ndr_pack
3008 if newIntEdge.v1.ndrpacked_guid > newIntEdge.v2.ndrpacked_guid:
3009 newIntEdge.v1 = root2
3010 newIntEdge.v2 = root1
3012 internal_edges.add(newIntEdge)
3015 def kruskal(graph, edges):
3016 for v in graph.vertices:
3017 v.edges = []
3019 components = set([x for x in graph.vertices if not x.is_white()])
3020 edges = list(edges)
3022 # Sorted based on internal comparison function of internal edge
3023 edges.sort()
3025 #XXX expected_num_tree_edges is never used
3026 expected_num_tree_edges = 0 # TODO this value makes little sense
3028 count_edges = 0
3029 output_edges = []
3030 index = 0
3031 while index < len(edges): # TODO and num_components > 1
3032 e = edges[index]
3033 parent1 = find_component(e.v1)
3034 parent2 = find_component(e.v2)
3035 if parent1 is not parent2:
3036 count_edges += 1
3037 add_out_edge(graph, output_edges, e)
3038 parent1.component_id = parent2
3039 components.discard(parent1)
3041 index += 1
3043 return output_edges, len(components)
3046 def find_component(vertex):
3047 if vertex.component_id is vertex:
3048 return vertex
3050 current = vertex
3051 while current.component_id is not current:
3052 current = current.component_id
3054 root = current
3055 current = vertex
3056 while current.component_id is not root:
3057 n = current.component_id
3058 current.component_id = root
3059 current = n
3061 return root
3064 def add_out_edge(graph, output_edges, e):
3065 v1 = e.v1
3066 v2 = e.v2
3068 # This multi-edge is a 'real' edge with no GUID
3069 ee = MultiEdge()
3070 ee.directed = False
3071 ee.site_link = e.site_link
3072 ee.vertices.append(v1)
3073 ee.vertices.append(v2)
3074 ee.con_type = e.e_type
3075 ee.repl_info = e.repl_info
3076 output_edges.append(ee)
3078 v1.edges.append(ee)
3079 v2.edges.append(ee)
3082 def test_all_reps_from(lp, creds, rng_seed=None):
3083 kcc = KCC()
3084 kcc.load_samdb(opts.dburl, lp, creds)
3085 dsas = kcc.list_dsas()
3086 needed_parts = {}
3087 current_parts = {}
3089 guid_to_dnstr = {}
3090 for site in kcc.site_table.values():
3091 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
3092 for dnstr, dsa in site.dsa_table.items())
3094 dot_edges = []
3095 dot_vertices = []
3096 colours = []
3097 vertex_colours = []
3099 for dsa_dn in dsas:
3100 if rng_seed:
3101 random.seed(rng_seed)
3102 kcc = KCC()
3103 kcc.run(opts.dburl, lp, creds, forced_local_dsa=dsa_dn,
3104 forget_local_links=opts.forget_local_links,
3105 forget_intersite_links=opts.forget_intersite_links)
3107 current, needed = kcc.my_dsa.get_rep_tables()
3109 for dsa in kcc.my_site.dsa_table.values():
3110 if dsa is kcc.my_dsa:
3111 continue
3112 kcc.translate_ntdsconn(dsa)
3113 c, n = dsa.get_rep_tables()
3114 current.update(c)
3115 needed.update(n)
3117 for name, rep_table, rep_parts in (
3118 ('needed', needed, needed_parts),
3119 ('current', current, current_parts)):
3120 for part, nc_rep in rep_table.items():
3121 edges = rep_parts.setdefault(part, [])
3122 for reps_from in nc_rep.rep_repsFrom:
3123 source = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
3124 dest = guid_to_dnstr[str(nc_rep.rep_dsa_guid)]
3125 edges.append((source, dest))
3127 for site in kcc.site_table.values():
3128 for dsa in site.dsa_table.values():
3129 if dsa.is_ro():
3130 vertex_colours.append('#cc0000')
3131 else:
3132 vertex_colours.append('#0000cc')
3133 dot_vertices.append(dsa.dsa_dnstr)
3134 if dsa.connect_table:
3135 DEBUG_FN("DSA %s %s connections:\n%s" %
3136 (dsa.dsa_dnstr, len(dsa.connect_table),
3137 [x.from_dnstr for x in
3138 dsa.connect_table.values()]))
3139 for con in dsa.connect_table.values():
3140 if con.is_rodc_topology():
3141 colours.append('red')
3142 else:
3143 colours.append('blue')
3144 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
3146 verify_and_dot('all-dsa-connections', dot_edges, vertices=dot_vertices,
3147 label="all dsa NTDSConnections", properties=(),
3148 debug=DEBUG, verify=opts.verify, dot_files=opts.dot_files,
3149 directed=True, edge_colors=colours,
3150 vertex_colors=vertex_colours)
3152 for name, rep_parts in (('needed', needed_parts),
3153 ('current', current_parts)):
3154 for part, edges in rep_parts.items():
3155 verify_and_dot('all-repsFrom_%s__%s' % (name, part), edges,
3156 directed=True, label=part,
3157 properties=(), debug=DEBUG, verify=opts.verify,
3158 dot_files=opts.dot_files)
3161 logger = logging.getLogger("samba_kcc")
3162 logger.addHandler(logging.StreamHandler(sys.stdout))
3163 DEBUG = logger.debug
3166 def _color_debug(*args, **kwargs):
3167 DEBUG('%s%s%s' % (kwargs['color'], args[0], C_NORMAL), *args[1:])
3169 _globals = globals()
3170 for _color in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW',
3171 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA',
3172 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'):
3173 _globals['DEBUG_' + _color] = partial(_color_debug, color=_globals[_color])
3176 def DEBUG_FN(msg=''):
3177 import traceback
3178 filename, lineno, function, text = traceback.extract_stack(None, 2)[0]
3179 DEBUG("%s%s:%s%s %s%s()%s '%s'" % (CYAN, filename, BLUE, lineno,
3180 CYAN, function, C_NORMAL, msg))
3183 ##################################################
3184 # samba_kcc entry point
3185 ##################################################
3187 parser = optparse.OptionParser("samba_kcc [options]")
3188 sambaopts = options.SambaOptions(parser)
3189 credopts = options.CredentialsOptions(parser)
3191 parser.add_option_group(sambaopts)
3192 parser.add_option_group(credopts)
3193 parser.add_option_group(options.VersionOptions(parser))
3195 parser.add_option("--readonly", default=False,
3196 help="compute topology but do not update database",
3197 action="store_true")
3199 parser.add_option("--debug",
3200 help="debug output",
3201 action="store_true")
3203 parser.add_option("--verify",
3204 help="verify that assorted invariants are kept",
3205 action="store_true")
3207 parser.add_option("--list-verify-tests",
3208 help=("list what verification actions are available "
3209 "and do nothing else"),
3210 action="store_true")
3212 parser.add_option("--no-dot-files", dest='dot_files',
3213 help="Don't write dot graph files in /tmp",
3214 default=True, action="store_false")
3216 parser.add_option("--seed",
3217 help="random number seed",
3218 type=int)
3220 parser.add_option("--importldif",
3221 help="import topology ldif file",
3222 type=str, metavar="<file>")
3224 parser.add_option("--exportldif",
3225 help="export topology ldif file",
3226 type=str, metavar="<file>")
3228 parser.add_option("-H", "--URL",
3229 help="LDB URL for database or target server",
3230 type=str, metavar="<URL>", dest="dburl")
3232 parser.add_option("--tmpdb",
3233 help="schemaless database file to create for ldif import",
3234 type=str, metavar="<file>")
3236 parser.add_option("--now",
3237 help=("assume current time is this ('YYYYmmddHHMMSS[tz]',"
3238 " default: system time)"),
3239 type=str, metavar="<date>")
3241 parser.add_option("--forced-local-dsa",
3242 help="run calculations assuming the DSA is this DN",
3243 type=str, metavar="<DSA>")
3245 parser.add_option("--attempt-live-connections", default=False,
3246 help="Attempt to connect to other DSAs to test links",
3247 action="store_true")
3249 parser.add_option("--list-valid-dsas", default=False,
3250 help=("Print a list of DSA dnstrs that could be"
3251 " used in --forced-local-dsa"),
3252 action="store_true")
3254 parser.add_option("--test-all-reps-from", default=False,
3255 help="Create and verify a graph of reps-from for every DSA",
3256 action="store_true")
3258 parser.add_option("--forget-local-links", default=False,
3259 help="pretend not to know the existing local topology",
3260 action="store_true")
3262 parser.add_option("--forget-intersite-links", default=False,
3263 help="pretend not to know the existing intersite topology",
3264 action="store_true")
3267 opts, args = parser.parse_args()
3270 if opts.list_verify_tests:
3271 list_verify_tests()
3272 sys.exit(0)
3274 if opts.debug:
3275 logger.setLevel(logging.DEBUG)
3276 elif opts.readonly:
3277 logger.setLevel(logging.INFO)
3278 else:
3279 logger.setLevel(logging.WARNING)
3281 # initialize seed from optional input parameter
3282 if opts.seed:
3283 random.seed(opts.seed)
3284 else:
3285 random.seed(0xACE5CA11)
3287 if opts.now:
3288 for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"):
3289 try:
3290 now_tuple = time.strptime(opts.now, timeformat)
3291 break
3292 except ValueError:
3293 pass
3294 else:
3295 # else happens if break doesn't --> no match
3296 print >> sys.stderr, "could not parse time '%s'" % opts.now
3297 sys.exit(1)
3299 unix_now = int(time.mktime(now_tuple))
3300 else:
3301 unix_now = int(time.time())
3303 nt_now = unix2nttime(unix_now)
3305 lp = sambaopts.get_loadparm()
3306 creds = credopts.get_credentials(lp, fallback_machine=True)
3308 if opts.dburl is None:
3309 opts.dburl = lp.samdb_url()
3311 if opts.test_all_reps_from:
3312 opts.readonly = True
3313 rng_seed = opts.seed or 0xACE5CA11
3314 test_all_reps_from(lp, creds, rng_seed=rng_seed)
3315 sys.exit()
3317 # Instantiate Knowledge Consistency Checker and perform run
3318 kcc = KCC()
3320 if opts.exportldif:
3321 rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
3322 sys.exit(rc)
3324 if opts.importldif:
3325 if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
3326 logger.error("Specify a target temp database file with --tmpdb option")
3327 sys.exit(1)
3329 rc = kcc.import_ldif(opts.tmpdb, lp, creds, opts.importldif)
3330 if rc != 0:
3331 sys.exit(rc)
3333 if opts.list_valid_dsas:
3334 kcc.load_samdb(opts.dburl, lp, creds)
3335 print '\n'.join(kcc.list_dsas())
3336 sys.exit()
3338 try:
3339 rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa,
3340 opts.forget_local_links, opts.forget_intersite_links)
3341 sys.exit(rc)
3343 except GraphError, e:
3344 print e
3345 sys.exit(1)