KCC: sitelink graph is undirected
[Samba.git] / source4 / scripting / bin / samba_kcc
blob125bc11ed86fba2f4e91b12ef2538531ecda359b
1 #!/usr/bin/env python
3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
6 # Copyright (C) Andrew Bartlett 2015
8 # Andrew Bartlett's alleged work performed by his underlings Douglas
9 # Bagnall and Garming Sam.
11 # This program is free software; you can redistribute it and/or modify
12 # it under the terms of the GNU General Public License as published by
13 # the Free Software Foundation; either version 3 of the License, or
14 # (at your option) any later version.
16 # This program is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU General Public License for more details.
21 # You should have received a copy of the GNU General Public License
22 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 import os
25 import sys
26 import random
28 # ensure we get messages out immediately, so they get in the samba logs,
29 # and don't get swallowed by a timeout
30 os.environ['PYTHONUNBUFFERED'] = '1'
32 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
33 # heimdal can get mutual authentication errors due to the 24 second difference
34 # between UTC and GMT when using some zone files (eg. the PDT zone from
35 # the US)
36 os.environ["TZ"] = "GMT"
38 # Find right directory when running from source tree
39 sys.path.insert(0, "bin/python")
41 import optparse
42 import logging
43 import itertools
44 import heapq
45 import time
47 from samba import (
48 getopt as options,
49 Ldb,
50 ldb,
51 dsdb,
52 read_and_sub_file,
53 drs_utils,
54 nttime2unix)
55 from samba.auth import system_session
56 from samba.samdb import SamDB
57 from samba.dcerpc import drsuapi
58 from samba.kcc_utils import *
60 class KCC(object):
61 """The Knowledge Consistency Checker class.
63 A container for objects and methods allowing a run of the KCC. Produces a
64 set of connections in the samdb for which the Distributed Replication
65 Service can then utilize to replicate naming contexts
66 """
67 def __init__(self):
68 """Initializes the partitions class which can hold
69 our local DCs partitions or all the partitions in
70 the forest
71 """
72 self.part_table = {} # partition objects
73 self.site_table = {}
74 self.transport_table = {}
75 self.sitelink_table = {}
77 # TODO: These should be backed by a 'permanent' store so that when
78 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
79 # the failure information can be returned
80 self.kcc_failed_links = {}
81 self.kcc_failed_connections = set()
83 # Used in inter-site topology computation. A list
84 # of connections (by NTDSConnection object) that are
85 # to be kept when pruning un-needed NTDS Connections
86 self.keep_connection_list = []
88 self.my_dsa_dnstr = None # My dsa DN
89 self.my_dsa = None # My dsa object
91 self.my_site_dnstr = None
92 self.my_site_guid = None
93 self.my_site = None
95 self.samdb = None
97 def load_all_transports(self):
98 """Loads the inter-site transport objects for Sites
100 ::returns: Raises an Exception on error
102 try:
103 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
104 self.samdb.get_config_basedn(),
105 scope=ldb.SCOPE_SUBTREE,
106 expression="(objectClass=interSiteTransport)")
107 except ldb.LdbError, (enum, estr):
108 raise Exception("Unable to find inter-site transports - (%s)" %
109 estr)
111 for msg in res:
112 dnstr = str(msg.dn)
114 transport = Transport(dnstr)
116 transport.load_transport(self.samdb)
118 # already loaded
119 if str(transport.guid) in self.transport_table.keys():
120 continue
122 # Assign this transport to table
123 # and index by guid
124 self.transport_table[str(transport.guid)] = transport
126 def load_all_sitelinks(self):
127 """Loads the inter-site siteLink objects
129 ::returns: Raises an Exception on error
131 try:
132 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
133 self.samdb.get_config_basedn(),
134 scope=ldb.SCOPE_SUBTREE,
135 expression="(objectClass=siteLink)")
136 except ldb.LdbError, (enum, estr):
137 raise Exception("Unable to find inter-site siteLinks - (%s)" % estr)
139 for msg in res:
140 dnstr = str(msg.dn)
142 # already loaded
143 if dnstr in self.sitelink_table.keys():
144 continue
146 sitelink = SiteLink(dnstr)
148 sitelink.load_sitelink(self.samdb)
150 # Assign this siteLink to table
151 # and index by dn
152 self.sitelink_table[dnstr] = sitelink
154 def load_my_site(self):
155 """Loads the Site class for the local DSA
157 ::returns: Raises an Exception on error
159 self.my_site_dnstr = "CN=%s,CN=Sites,%s" % (
160 self.samdb.server_site_name(),
161 self.samdb.get_config_basedn())
162 site = Site(self.my_site_dnstr, unix_now)
163 site.load_site(self.samdb)
165 self.site_table[str(site.site_guid)] = site
166 self.my_site_guid = site.site_guid
167 self.my_site = site
169 def load_all_sites(self):
170 """Discover all sites and instantiate and load each
171 NTDS Site settings.
173 ::returns: Raises an Exception on error
175 try:
176 res = self.samdb.search("CN=Sites,%s" %
177 self.samdb.get_config_basedn(),
178 scope=ldb.SCOPE_SUBTREE,
179 expression="(objectClass=site)")
180 except ldb.LdbError, (enum, estr):
181 raise Exception("Unable to find sites - (%s)" % estr)
183 for msg in res:
184 sitestr = str(msg.dn)
186 site = Site(sitestr, unix_now)
187 site.load_site(self.samdb)
189 # already loaded
190 if str(site.site_guid) in self.site_table.keys():
191 continue
193 self.site_table[str(site.site_guid)] = site
195 def load_my_dsa(self):
196 """Discover my nTDSDSA dn thru the rootDSE entry
198 ::returns: Raises an Exception on error.
200 dn = ldb.Dn(self.samdb, "<GUID=%s>" % self.samdb.get_ntds_GUID())
201 try:
202 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
203 attrs=["objectGUID"])
204 except ldb.LdbError, (enum, estr):
205 DEBUG("Search for %s failed: %s. This typically happens in"
206 " --importldif mode due to lack of module support",
207 dn, estr)
208 try:
209 # We work around the failure above by looking at the
210 # dsServiceName that was put in the fake rootdse by
211 # the --exportldif, rather than the
212 # samdb.get_ntds_GUID(). The disadvantage is that this
213 # mode requires we modify the @ROOTDSE dnq to support
214 # --forced-local-dsa
215 service_name_res = self.samdb.search(base="", scope=ldb.SCOPE_BASE,
216 attrs=["dsServiceName"])
217 dn = ldb.Dn(self.samdb, service_name_res[0]["dsServiceName"][0])
219 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
220 attrs=["objectGUID"])
221 except ldb.LdbError, (enum, estr):
222 raise Exception("Unable to find my nTDSDSA - (%s)" % estr)
224 if len(res) != 1:
225 raise Exception("Unable to find my nTDSDSA at %s" % dn.extended_str())
227 if misc.GUID(res[0]["objectGUID"][0]) != misc.GUID(self.samdb.get_ntds_GUID()):
228 raise Exception("Did not find the GUID we expected, perhaps due to --importldif")
230 self.my_dsa_dnstr = str(res[0].dn)
232 self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
234 def load_all_partitions(self):
235 """Discover all NCs thru the Partitions dn and
236 instantiate and load the NCs.
238 Each NC is inserted into the part_table by partition
239 dn string (not the nCName dn string)
241 ::returns: Raises an Exception on error
243 try:
244 res = self.samdb.search("CN=Partitions,%s" %
245 self.samdb.get_config_basedn(),
246 scope=ldb.SCOPE_SUBTREE,
247 expression="(objectClass=crossRef)")
248 except ldb.LdbError, (enum, estr):
249 raise Exception("Unable to find partitions - (%s)" % estr)
251 for msg in res:
252 partstr = str(msg.dn)
254 # already loaded
255 if partstr in self.part_table.keys():
256 continue
258 part = Partition(partstr)
260 part.load_partition(self.samdb)
261 self.part_table[partstr] = part
263 def should_be_present_test(self):
264 """Enumerate all loaded partitions and DSAs in local
265 site and test if NC should be present as replica
267 for partdn, part in self.part_table.items():
268 for dsadn, dsa in self.my_site.dsa_table.items():
269 needed, ro, partial = part.should_be_present(dsa)
270 logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
271 (dsadn, part.nc_dnstr, needed, ro, partial))
273 def refresh_failed_links_connections(self):
274 """Instead of NULL link with failure_count = 0, the tuple is simply removed"""
276 # LINKS: Refresh failed links
277 self.kcc_failed_links = {}
278 current, needed = self.my_dsa.get_rep_tables()
279 for replica in current.values():
280 # For every possible connection to replicate
281 for reps_from in replica.rep_repsFrom:
282 failure_count = reps_from.consecutive_sync_failures
283 if failure_count <= 0:
284 continue
286 dsa_guid = str(reps_from.source_dsa_obj_guid)
287 time_first_failure = reps_from.last_success
288 last_result = reps_from.last_attempt
289 dns_name = reps_from.dns_name1
291 f = self.kcc_failed_links.get(dsa_guid)
292 if not f:
293 f = KCCFailedObject(dsa_guid, failure_count,
294 time_first_failure, last_result,
295 dns_name)
296 self.kcc_failed_links[dsa_guid] = f
297 #elif f.failure_count == 0:
298 # f.failure_count = failure_count
299 # f.time_first_failure = time_first_failure
300 # f.last_result = last_result
301 else:
302 f.failure_count = max(f.failure_count, failure_count)
303 f.time_first_failure = min(f.time_first_failure, time_first_failure)
304 f.last_result = last_result
306 # CONNECTIONS: Refresh failed connections
307 restore_connections = set()
308 for connection in self.kcc_failed_connections:
309 try:
310 drs_utils.drsuapi_connect(connection.dns_name, lp, creds)
311 # Failed connection is no longer failing
312 restore_connections.add(connection)
313 except drs_utils.drsException:
314 # Failed connection still failing
315 connection.failure_count += 1
317 # Remove the restored connections from the failed connections
318 self.kcc_failed_connections.difference_update(restore_connections)
320 def is_stale_link_connection(self, target_dsa):
321 """Returns False if no tuple z exists in the kCCFailedLinks or
322 kCCFailedConnections variables such that z.UUIDDsa is the
323 objectGUID of the target dsa, z.FailureCount > 0, and
324 the current time - z.TimeFirstFailure > 2 hours.
326 # Returns True if tuple z exists...
327 failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
328 if failed_link:
329 # failure_count should be > 0, but check anyways
330 if failed_link.failure_count > 0:
331 unix_first_time_failure = nttime2unix(failed_link.time_first_failure)
332 # TODO guard against future
333 if unix_first_time_failure > unix_now:
334 logger.error("The last success time attribute for \
335 repsFrom is in the future!")
337 # Perform calculation in seconds
338 if (unix_now - unix_first_time_failure) > 60 * 60 * 2:
339 return True
341 # TODO connections
343 return False
345 # TODO: This should be backed by some form of local database
346 def remove_unneeded_failed_links_connections(self):
347 # Remove all tuples in kcc_failed_links where failure count = 0
348 # In this implementation, this should never happen.
350 # Remove all connections which were not used this run or connections
351 # that became active during this run.
352 pass
354 def remove_unneeded_ntdsconn(self, all_connected):
355 """Removes unneeded NTDS Connections after computation
356 of KCC intra and inter-site topology has finished.
358 mydsa = self.my_dsa
360 # Loop thru connections
361 for cn_dnstr, cn_conn in mydsa.connect_table.items():
363 s_dnstr = cn_conn.get_from_dnstr()
364 if s_dnstr is None:
365 cn_conn.to_be_deleted = True
366 continue
368 # Get the source DSA no matter what site
369 s_dsa = self.get_dsa(s_dnstr)
371 # Check if the DSA is in our site
372 if self.my_site.same_site(s_dsa):
373 same_site = True
374 else:
375 same_site = False
377 # Given an nTDSConnection object cn, if the DC with the
378 # nTDSDSA object dc that is the parent object of cn and
379 # the DC with the nTDSDA object referenced by cn!fromServer
380 # are in the same site, the KCC on dc deletes cn if all of
381 # the following are true:
383 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
385 # No site settings object s exists for the local DC's site, or
386 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
387 # s!options.
389 # Another nTDSConnection object cn2 exists such that cn and
390 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
391 # and either
393 # cn!whenCreated < cn2!whenCreated
395 # cn!whenCreated = cn2!whenCreated and
396 # cn!objectGUID < cn2!objectGUID
398 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
399 if same_site:
400 if not cn_conn.is_generated():
401 continue
403 if self.my_site.is_cleanup_ntdsconn_disabled():
404 continue
406 # Loop thru connections looking for a duplicate that
407 # fulfills the previous criteria
408 lesser = False
410 for cn2_dnstr, cn2_conn in mydsa.connect_table.items():
411 if cn2_conn is cn_conn:
412 continue
414 s2_dnstr = cn2_conn.get_from_dnstr()
415 if s2_dnstr is None:
416 continue
418 # If the NTDS Connections has a different
419 # fromServer field then no match
420 if s2_dnstr != s_dnstr:
421 continue
423 #XXX GUID comparison
424 lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or
425 (cn_conn.whenCreated == cn2_conn.whenCreated and
426 ndr_pack(cn_conn.guid) < ndr_pack(cn2_conn.guid)))
428 if lesser:
429 break
431 if lesser and not cn_conn.is_rodc_topology():
432 cn_conn.to_be_deleted = True
434 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
435 # object dc that is the parent object of cn and the DC with
436 # the nTDSDSA object referenced by cn!fromServer are in
437 # different sites, a KCC acting as an ISTG in dc's site
438 # deletes cn if all of the following are true:
440 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
442 # cn!fromServer references an nTDSDSA object for a DC
443 # in a site other than the local DC's site.
445 # The keepConnections sequence returned by
446 # CreateIntersiteConnections() does not contain
447 # cn!objectGUID, or cn is "superseded by" (see below)
448 # another nTDSConnection cn2 and keepConnections
449 # contains cn2!objectGUID.
451 # The return value of CreateIntersiteConnections()
452 # was true.
454 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
455 # cn!options
457 else: # different site
459 if not mydsa.is_istg():
460 continue
462 if not cn_conn.is_generated():
463 continue
465 # TODO
466 # We are directly using this connection in intersite or
467 # we are using a connection which can supersede this one.
469 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
470 # appear to be correct.
472 # 1. cn!fromServer and cn!parent appear inconsistent with no cn2
473 # 2. The repsFrom do not imply each other
475 if self.keep_connection(cn_conn): # and not_superceded:
476 continue
478 # This is the result of create_intersite_connections
479 if not all_connected:
480 continue
482 if not cn_conn.is_rodc_topology():
483 cn_conn.to_be_deleted = True
486 if mydsa.is_ro() or opts.readonly:
487 for dnstr, connect in mydsa.connect_table.items():
488 if connect.to_be_deleted:
489 DEBUG_GREEN("TO BE DELETED:\n%s" % connect)
490 if connect.to_be_added:
491 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
493 # Peform deletion from our tables but perform
494 # no database modification
495 mydsa.commit_connections(self.samdb, ro=True)
496 else:
497 # Commit any modified connections
498 mydsa.commit_connections(self.samdb)
500 def get_dsa_by_guidstr(self, guidstr):
501 """Given a DSA guid string, consule all sites looking
502 for the corresponding DSA and return it.
504 for site in self.site_table.values():
505 dsa = site.get_dsa_by_guidstr(guidstr)
506 if dsa is not None:
507 return dsa
508 return None
510 def get_dsa(self, dnstr):
511 """Given a DSA dn string, consule all sites looking
512 for the corresponding DSA and return it.
514 for site in self.site_table.values():
515 dsa = site.get_dsa(dnstr)
516 if dsa is not None:
517 return dsa
518 return None
520 def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
521 """Update t_repsFrom if necessary to satisfy requirements. Such
522 updates are typically required when the IDL_DRSGetNCChanges
523 server has moved from one site to another--for example, to
524 enable compression when the server is moved from the
525 client's site to another site.
527 :param n_rep: NC replica we need
528 :param t_repsFrom: repsFrom tuple to modify
529 :param s_rep: NC replica at source DSA
530 :param s_dsa: source DSA
531 :param cn_conn: Local DSA NTDSConnection child
533 ::returns: (update) bit field containing which portion of the
534 repsFrom was modified. This bit field is suitable as input
535 to IDL_DRSReplicaModify ulModifyFields element, as it consists
536 of these bits:
537 drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
538 drsuapi.DRSUAPI_DRS_UPDATE_FLAGS
539 drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS
541 s_dnstr = s_dsa.dsa_dnstr
542 update = 0x0
544 if self.my_site.same_site(s_dsa):
545 same_site = True
546 else:
547 same_site = False
549 times = cn_conn.convert_schedule_to_repltimes()
551 # if schedule doesn't match then update and modify
552 if times != t_repsFrom.schedule:
553 t_repsFrom.schedule = times
555 # Bit DRS_PER_SYNC is set in replicaFlags if and only
556 # if nTDSConnection schedule has a value v that specifies
557 # scheduled replication is to be performed at least once
558 # per week.
559 if cn_conn.is_schedule_minimum_once_per_week():
561 if (t_repsFrom.replica_flags &
562 drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0:
563 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
565 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
566 # if the source DSA and the local DC's nTDSDSA object are
567 # in the same site or source dsa is the FSMO role owner
568 # of one or more FSMO roles in the NC replica.
569 if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
571 if (t_repsFrom.replica_flags &
572 drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0:
573 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
575 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
576 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
577 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
578 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
579 # t.replicaFlags if and only if s and the local DC's
580 # nTDSDSA object are in different sites.
581 if (cn_conn.options & dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0:
583 if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
585 if (t_repsFrom.replica_flags &
586 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0:
587 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
589 elif not same_site:
591 if (t_repsFrom.replica_flags &
592 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0:
593 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
595 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
596 # and only if s and the local DC's nTDSDSA object are
597 # not in the same site and the
598 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
599 # clear in cn!options
600 if (not same_site and
601 (cn_conn.options &
602 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
604 if (t_repsFrom.replica_flags &
605 drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0:
606 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
608 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
609 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
610 if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
612 if (t_repsFrom.replica_flags &
613 drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0:
614 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
616 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
617 # set in t.replicaFlags if and only if cn!enabledConnection = false.
618 if not cn_conn.is_enabled():
620 if (t_repsFrom.replica_flags &
621 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0:
622 t_repsFrom.replica_flags |= \
623 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
625 if (t_repsFrom.replica_flags &
626 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0:
627 t_repsFrom.replica_flags |= \
628 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
630 # If s and the local DC's nTDSDSA object are in the same site,
631 # cn!transportType has no value, or the RDN of cn!transportType
632 # is CN=IP:
634 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
636 # t.uuidTransport = NULL GUID.
638 # t.uuidDsa = The GUID-based DNS name of s.
640 # Otherwise:
642 # Bit DRS_MAIL_REP in t.replicaFlags is set.
644 # If x is the object with dsname cn!transportType,
645 # t.uuidTransport = x!objectGUID.
647 # Let a be the attribute identified by
648 # x!transportAddressAttribute. If a is
649 # the dNSHostName attribute, t.uuidDsa = the GUID-based
650 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
652 # It appears that the first statement i.e.
654 # "If s and the local DC's nTDSDSA object are in the same
655 # site, cn!transportType has no value, or the RDN of
656 # cn!transportType is CN=IP:"
658 # could be a slightly tighter statement if it had an "or"
659 # between each condition. I believe this should
660 # be interpreted as:
662 # IF (same-site) OR (no-value) OR (type-ip)
664 # because IP should be the primary transport mechanism
665 # (even in inter-site) and the absense of the transportType
666 # attribute should always imply IP no matter if its multi-site
668 # NOTE MS-TECH INCORRECT:
670 # All indications point to these statements above being
671 # incorrectly stated:
673 # t.uuidDsa = The GUID-based DNS name of s.
675 # Let a be the attribute identified by
676 # x!transportAddressAttribute. If a is
677 # the dNSHostName attribute, t.uuidDsa = the GUID-based
678 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
680 # because the uuidDSA is a GUID and not a GUID-base DNS
681 # name. Nor can uuidDsa hold (s!parent)!a if not
682 # dNSHostName. What should have been said is:
684 # t.naDsa = The GUID-based DNS name of s
686 # That would also be correct if transportAddressAttribute
687 # were "mailAddress" because (naDsa) can also correctly
688 # hold the SMTP ISM service address.
690 nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
692 # We're not currently supporting SMTP replication
693 # so is_smtp_replication_available() is currently
694 # always returning False
695 if (same_site or
696 cn_conn.transport_dnstr is None or
697 cn_conn.transport_dnstr.find("CN=IP") == 0 or
698 not is_smtp_replication_available()):
700 if (t_repsFrom.replica_flags &
701 drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0:
702 t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
704 null_guid = misc.GUID()
705 if (t_repsFrom.transport_guid is None or
706 t_repsFrom.transport_guid != null_guid):
707 t_repsFrom.transport_guid = null_guid
709 # See (NOTE MS-TECH INCORRECT) above
710 if t_repsFrom.version == 0x1:
711 if t_repsFrom.dns_name1 is None or \
712 t_repsFrom.dns_name1 != nastr:
713 t_repsFrom.dns_name1 = nastr
714 else:
715 if t_repsFrom.dns_name1 is None or \
716 t_repsFrom.dns_name2 is None or \
717 t_repsFrom.dns_name1 != nastr or \
718 t_repsFrom.dns_name2 != nastr:
719 t_repsFrom.dns_name1 = nastr
720 t_repsFrom.dns_name2 = nastr
722 else:
723 if (t_repsFrom.replica_flags &
724 drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0:
725 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP
727 # We have a transport type but its not an
728 # object in the database
729 if cn_conn.transport_guid not in self.transport_table.keys():
730 raise Exception("Missing inter-site transport - (%s)" %
731 cn_conn.transport_dnstr)
733 x_transport = self.transport_table[str(cn_conn.transport_guid)]
735 if t_repsFrom.transport_guid != x_transport.guid:
736 t_repsFrom.transport_guid = x_transport.guid
738 # See (NOTE MS-TECH INCORRECT) above
739 if x_transport.address_attr == "dNSHostName":
741 if t_repsFrom.version == 0x1:
742 if t_repsFrom.dns_name1 is None or \
743 t_repsFrom.dns_name1 != nastr:
744 t_repsFrom.dns_name1 = nastr
745 else:
746 if t_repsFrom.dns_name1 is None or \
747 t_repsFrom.dns_name2 is None or \
748 t_repsFrom.dns_name1 != nastr or \
749 t_repsFrom.dns_name2 != nastr:
750 t_repsFrom.dns_name1 = nastr
751 t_repsFrom.dns_name2 = nastr
753 else:
754 # MS tech specification says we retrieve the named
755 # attribute in "transportAddressAttribute" from the parent of
756 # the DSA object
757 try:
758 pdnstr = s_dsa.get_parent_dnstr()
759 attrs = [ x_transport.address_attr ]
761 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
762 attrs=attrs)
763 except ldb.LdbError, (enum, estr):
764 raise Exception(
765 "Unable to find attr (%s) for (%s) - (%s)" %
766 (x_transport.address_attr, pdnstr, estr))
768 msg = res[0]
769 nastr = str(msg[x_transport.address_attr][0])
771 # See (NOTE MS-TECH INCORRECT) above
772 if t_repsFrom.version == 0x1:
773 if t_repsFrom.dns_name1 is None or \
774 t_repsFrom.dns_name1 != nastr:
775 t_repsFrom.dns_name1 = nastr
776 else:
777 if t_repsFrom.dns_name1 is None or \
778 t_repsFrom.dns_name2 is None or \
779 t_repsFrom.dns_name1 != nastr or \
780 t_repsFrom.dns_name2 != nastr:
782 t_repsFrom.dns_name1 = nastr
783 t_repsFrom.dns_name2 = nastr
785 if t_repsFrom.is_modified():
786 logger.debug("modify_repsFrom(): %s" % t_repsFrom)
788 def is_repsFrom_implied(self, n_rep, cn_conn):
789 """Given a NC replica and NTDS Connection, determine if the connection
790 implies a repsFrom tuple should be present from the source DSA listed
791 in the connection to the naming context
793 :param n_rep: NC replica
794 :param conn: NTDS Connection
795 ::returns (True || False), source DSA:
797 # NTDS Connection must satisfy all the following criteria
798 # to imply a repsFrom tuple is needed:
800 # cn!enabledConnection = true.
801 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
802 # cn!fromServer references an nTDSDSA object.
803 s_dsa = None
805 if cn_conn.is_enabled() and not cn_conn.is_rodc_topology():
807 s_dnstr = cn_conn.get_from_dnstr()
808 if s_dnstr is not None:
809 s_dsa = self.get_dsa(s_dnstr)
811 # No DSA matching this source DN string?
812 if s_dsa is None:
813 return False, None
815 # To imply a repsFrom tuple is needed, each of these
816 # must be True:
818 # An NC replica of the NC "is present" on the DC to
819 # which the nTDSDSA object referenced by cn!fromServer
820 # corresponds.
822 # An NC replica of the NC "should be present" on
823 # the local DC
824 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
826 if s_rep is None or not s_rep.is_present():
827 return False, None
829 # To imply a repsFrom tuple is needed, each of these
830 # must be True:
832 # The NC replica on the DC referenced by cn!fromServer is
833 # a writable replica or the NC replica that "should be
834 # present" on the local DC is a partial replica.
836 # The NC is not a domain NC, the NC replica that
837 # "should be present" on the local DC is a partial
838 # replica, cn!transportType has no value, or
839 # cn!transportType has an RDN of CN=IP.
841 implied = (not s_rep.is_ro() or n_rep.is_partial()) and \
842 (not n_rep.is_domain() or
843 n_rep.is_partial() or
844 cn_conn.transport_dnstr is None or
845 cn_conn.transport_dnstr.find("CN=IP") == 0)
847 if implied:
848 return True, s_dsa
849 else:
850 return False, None
852 def translate_ntdsconn(self):
853 """This function adjusts values of repsFrom abstract attributes of NC
854 replicas on the local DC to match those implied by
855 nTDSConnection objects.
856 [MS-ADTS] 6.2.2.5
858 if self.my_dsa.is_translate_ntdsconn_disabled():
859 logger.debug("skipping translate_ntdsconn() because disabling flag is set")
860 return
862 logger.debug("translate_ntdsconn(): enter")
864 current_rep_table, needed_rep_table = self.my_dsa.get_rep_tables()
866 # Filled in with replicas we currently have that need deleting
867 delete_reps = set()
869 # We're using the MS notation names here to allow
870 # correlation back to the published algorithm.
872 # n_rep - NC replica (n)
873 # t_repsFrom - tuple (t) in n!repsFrom
874 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
875 # object (s) such that (s!objectGUID = t.uuidDsa)
876 # In our IDL representation of repsFrom the (uuidDsa)
877 # attribute is called (source_dsa_obj_guid)
878 # cn_conn - (cn) is nTDSConnection object and child of the local DC's
879 # nTDSDSA object and (cn!fromServer = s)
880 # s_rep - source DSA replica of n
882 # If we have the replica and its not needed
883 # then we add it to the "to be deleted" list.
884 for dnstr in current_rep_table:
885 if dnstr not in needed_rep_table:
886 delete_reps.add(dnstr)
888 if delete_reps:
889 DEBUG('current %d needed %d delete %d', len(current_rep_table),
890 len(needed_rep_table), len(delete_reps))
891 DEBUG('deleting these reps: %s', delete_reps)
892 for dnstr in delete_reps:
893 del current_rep_table[dnstr]
895 # Now perform the scan of replicas we'll need
896 # and compare any current repsFrom against the
897 # connections
898 for dnstr, n_rep in needed_rep_table.items():
900 # load any repsFrom and fsmo roles as we'll
901 # need them during connection translation
902 n_rep.load_repsFrom(self.samdb)
903 n_rep.load_fsmo_roles(self.samdb)
905 # Loop thru the existing repsFrom tupples (if any)
906 for i, t_repsFrom in enumerate(n_rep.rep_repsFrom):
908 # for each tuple t in n!repsFrom, let s be the nTDSDSA
909 # object such that s!objectGUID = t.uuidDsa
910 guidstr = str(t_repsFrom.source_dsa_obj_guid)
911 s_dsa = self.get_dsa_by_guidstr(guidstr)
913 # Source dsa is gone from config (strange)
914 # so cleanup stale repsFrom for unlisted DSA
915 if s_dsa is None:
916 logger.debug("repsFrom source DSA guid (%s) not found" %
917 guidstr)
918 t_repsFrom.to_be_deleted = True
919 continue
921 s_dnstr = s_dsa.dsa_dnstr
923 # Retrieve my DSAs connection object (if it exists)
924 # that specifies the fromServer equivalent to
925 # the DSA that is specified in the repsFrom source
926 cn_conn = self.my_dsa.get_connection_by_from_dnstr(s_dnstr)
928 # Let (cn) be the nTDSConnection object such that (cn)
929 # is a child of the local DC's nTDSDSA object and
930 # (cn!fromServer = s) and (cn!options) does not contain
931 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
932 if cn_conn and not cn_conn.is_rodc_topology():
933 cn_conn = None
935 # KCC removes this repsFrom tuple if any of the following
936 # is true:
937 # cn = NULL.
939 # No NC replica of the NC "is present" on DSA that
940 # would be source of replica
942 # A writable replica of the NC "should be present" on
943 # the local DC, but a partial replica "is present" on
944 # the source DSA
945 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
947 if cn_conn is None or \
948 s_rep is None or not s_rep.is_present() or \
949 (not n_rep.is_ro() and s_rep.is_partial()):
951 t_repsFrom.to_be_deleted = True
952 continue
954 # If the KCC did not remove t from n!repsFrom, it updates t
955 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
957 # Loop thru connections and add implied repsFrom tuples
958 # for each NTDSConnection under our local DSA if the
959 # repsFrom is not already present
960 for cn_dnstr, cn_conn in self.my_dsa.connect_table.items():
962 implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn)
963 if not implied:
964 continue
966 # Loop thru the existing repsFrom tupples (if any) and
967 # if we already have a tuple for this connection then
968 # no need to proceed to add. It will have been changed
969 # to have the correct attributes above
970 for i, t_repsFrom in enumerate(n_rep.rep_repsFrom):
972 guidstr = str(t_repsFrom.source_dsa_obj_guid)
973 if s_dsa is self.get_dsa_by_guidstr(guidstr):
974 s_dsa = None
975 break
977 if s_dsa is None:
978 continue
980 # Create a new RepsFromTo and proceed to modify
981 # it according to specification
982 t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
984 t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
986 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
988 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
990 # Add to our NC repsFrom as this is newly computed
991 if t_repsFrom.is_modified():
992 n_rep.rep_repsFrom.append(t_repsFrom)
994 if opts.readonly:
995 # Display any to be deleted or modified repsFrom
996 text = n_rep.dumpstr_to_be_deleted()
997 if text:
998 logger.info("TO BE DELETED:\n%s" % text)
999 text = n_rep.dumpstr_to_be_modified()
1000 if text:
1001 logger.info("TO BE MODIFIED:\n%s" % text)
1003 # Peform deletion from our tables but perform
1004 # no database modification
1005 n_rep.commit_repsFrom(self.samdb, ro=True)
1006 else:
1007 # Commit any modified repsFrom to the NC replica
1008 n_rep.commit_repsFrom(self.samdb)
1010 def keep_connection(self, cn_conn):
1011 """Determines if the connection is meant to be kept during the
1012 pruning of unneeded connections operation.
1014 Consults the keep_connection_list[] which was built during
1015 intersite NC replica graph computation.
1017 ::returns (True or False): if (True) connection should not be pruned
1019 if cn_conn in self.keep_connection_list:
1020 return True
1021 return False
1023 def merge_failed_links(self):
1024 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
1025 The KCC on a writable DC attempts to merge the link and connection
1026 failure information from bridgehead DCs in its own site to help it
1027 identify failed bridgehead DCs.
1029 # MS-TECH Ref 6.2.2.3.2 Merge of kCCFailedLinks and kCCFailedLinks
1030 # from Bridgeheads
1032 # 1. Queries every bridgehead server in your site (other than yourself)
1033 # 2. For every ntDSConnection that references a server in a different
1034 # site merge all the failure info
1036 # XXX - not implemented yet
1038 def setup_graph(self, part):
1039 """Set up a GRAPH, populated with a VERTEX for each site
1040 object, a MULTIEDGE for each siteLink object, and a
1041 MUTLIEDGESET for each siteLinkBridge object (or implied
1042 siteLinkBridge).
1044 ::returns: a new graph
1046 guid_to_vertex = {}
1047 # Create graph
1048 g = IntersiteGraph()
1049 # Add vertices
1050 for site_guid, site in self.site_table.items():
1051 vertex = Vertex(site, part)
1052 vertex.guid = site_guid
1053 vertex.ndrpacked_guid = ndr_pack(site.site_guid)
1054 g.vertices.add(vertex)
1056 if not guid_to_vertex.get(site_guid):
1057 guid_to_vertex[site_guid] = []
1059 guid_to_vertex[site_guid].append(vertex)
1061 connected_vertices = set()
1062 for transport_guid, transport in self.transport_table.items():
1063 # Currently only ever "IP"
1064 for site_link_dn, site_link in self.sitelink_table.items():
1065 new_edge = create_edge(transport_guid, site_link, guid_to_vertex)
1066 connected_vertices.update(new_edge.vertices)
1067 g.edges.add(new_edge)
1069 # If 'Bridge all site links' is enabled and Win2k3 bridges required is not set
1070 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1071 # No documentation for this however, ntdsapi.h appears to have listed:
1072 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1073 if ((self.my_site.site_options & 0x00000002) == 0
1074 and (self.my_site.site_options & 0x00001000) == 0):
1075 g.edge_set.add(create_auto_edge_set(g, transport_guid))
1076 else:
1077 # TODO get all site link bridges
1078 for site_link_bridge in []:
1079 g.edge_set.add(create_edge_set(g, transport_guid,
1080 site_link_bridge))
1082 g.connected_vertices = connected_vertices
1084 return g
1086 def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
1087 """Get a bridghead DC.
1089 :param site: site object representing for which a bridgehead
1090 DC is desired.
1091 :param part: crossRef for NC to replicate.
1092 :param transport: interSiteTransport object for replication
1093 traffic.
1094 :param partial_ok: True if a DC containing a partial
1095 replica or a full replica will suffice, False if only
1096 a full replica will suffice.
1097 :param detect_failed: True to detect failed DCs and route
1098 replication traffic around them, False to assume no DC
1099 has failed.
1100 ::returns: dsa object for the bridgehead DC or None
1103 bhs = self.get_all_bridgeheads(site, part, transport,
1104 partial_ok, detect_failed)
1105 if len(bhs) == 0:
1106 logger.debug("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
1107 site.site_dnstr)
1108 return None
1109 else:
1110 logger.debug("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
1111 (site.site_dnstr, bhs[0].dsa_dnstr))
1112 return bhs[0]
1114 def get_all_bridgeheads(self, site, part, transport,
1115 partial_ok, detect_failed):
1116 """Get all bridghead DCs satisfying the given criteria
1118 :param site: site object representing the site for which
1119 bridgehead DCs are desired.
1120 :param part: partition for NC to replicate.
1121 :param transport: interSiteTransport object for
1122 replication traffic.
1123 :param partial_ok: True if a DC containing a partial
1124 replica or a full replica will suffice, False if
1125 only a full replica will suffice.
1126 :param detect_ok: True to detect failed DCs and route
1127 replication traffic around them, FALSE to assume
1128 no DC has failed.
1129 ::returns: list of dsa object for available bridgehead
1130 DCs or None
1133 bhs = []
1135 logger.debug("get_all_bridgeheads: %s" % transport)
1137 logger.debug(site.dsa_table)
1138 for key, dsa in site.dsa_table.items():
1140 pdnstr = dsa.get_parent_dnstr()
1142 # IF t!bridgeheadServerListBL has one or more values and
1143 # t!bridgeheadServerListBL does not contain a reference
1144 # to the parent object of dc then skip dc
1145 if (len(transport.bridgehead_list) != 0 and
1146 pdnstr not in transport.bridgehead_list):
1147 continue
1149 # IF dc is in the same site as the local DC
1150 # IF a replica of cr!nCName is not in the set of NC replicas
1151 # that "should be present" on dc or a partial replica of the
1152 # NC "should be present" but partialReplicasOkay = FALSE
1153 # Skip dc
1154 if self.my_site.same_site(dsa):
1155 needed, ro, partial = part.should_be_present(dsa)
1156 if not needed or (partial and not partial_ok):
1157 continue
1159 # ELSE
1160 # IF an NC replica of cr!nCName is not in the set of NC
1161 # replicas that "are present" on dc or a partial replica of
1162 # the NC "is present" but partialReplicasOkay = FALSE
1163 # Skip dc
1164 else:
1165 rep = dsa.get_current_replica(part.nc_dnstr)
1166 if rep is None or (rep.is_partial() and not partial_ok):
1167 continue
1169 # IF AmIRODC() and cr!nCName corresponds to default NC then
1170 # Let dsaobj be the nTDSDSA object of the dc
1171 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1172 # Skip dc
1173 if self.my_dsa.is_ro() and part.is_default():
1174 if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1175 continue
1177 # IF t!name != "IP" and the parent object of dc has no value for
1178 # the attribute specified by t!transportAddressAttribute
1179 # Skip dc
1180 if transport.name != "IP":
1181 # MS tech specification says we retrieve the named
1182 # attribute in "transportAddressAttribute" from the parent
1183 # of the DSA object
1184 try:
1185 attrs = [ transport.address_attr ]
1187 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
1188 attrs=attrs)
1189 except ldb.LdbError, (enum, estr):
1190 continue
1192 msg = res[0]
1193 if transport.address_attr not in msg:
1194 continue
1196 nastr = str(msg[transport.address_attr][0])
1198 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1199 # Skip dc
1200 if self.is_bridgehead_failed(dsa, detect_failed):
1201 continue
1203 logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
1204 bhs.append(dsa)
1206 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1207 # s!options
1208 # SORT bhs such that all GC servers precede DCs that are not GC
1209 # servers, and otherwise by ascending objectGUID
1210 # ELSE
1211 # SORT bhs in a random order
1212 if site.is_random_bridgehead_disabled():
1213 bhs.sort(sort_dsa_by_gc_and_guid)
1214 else:
1215 random.shuffle(bhs)
1217 return bhs
1220 def is_bridgehead_failed(self, dsa, detect_failed):
1221 """Determine whether a given DC is known to be in a failed state
1222 ::returns: True if and only if the DC should be considered failed
1224 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1225 # When DETECT_STALE_DISABLED, we can never know of if it's in a failed state
1226 if self.my_site.site_options & 0x00000008:
1227 return False
1228 elif self.is_stale_link_connection(dsa):
1229 return True
1231 return detect_failed # TODO WHY?
1233 def create_connection(self, part, rbh, rsite, transport,
1234 lbh, lsite, link_opt, link_sched,
1235 partial_ok, detect_failed):
1236 """Create an nTDSConnection object with the given parameters
1237 if one does not already exist.
1239 :param part: crossRef object for the NC to replicate.
1240 :param rbh: nTDSDSA object for DC to act as the
1241 IDL_DRSGetNCChanges server (which is in a site other
1242 than the local DC's site).
1243 :param rsite: site of the rbh
1244 :param transport: interSiteTransport object for the transport
1245 to use for replication traffic.
1246 :param lbh: nTDSDSA object for DC to act as the
1247 IDL_DRSGetNCChanges client (which is in the local DC's site).
1248 :param lsite: site of the lbh
1249 :param link_opt: Replication parameters (aggregated siteLink options, etc.)
1250 :param link_sched: Schedule specifying the times at which
1251 to begin replicating.
1252 :partial_ok: True if bridgehead DCs containing partial
1253 replicas of the NC are acceptable.
1254 :param detect_failed: True to detect failed DCs and route
1255 replication traffic around them, FALSE to assume no DC
1256 has failed.
1258 rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
1259 partial_ok, False)
1261 # MS-TECH says to compute rbhs_avail but then doesn't use it
1262 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1263 # partial_ok, detect_failed)
1265 lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
1266 partial_ok, False)
1268 # MS-TECH says to compute lbhs_avail but then doesn't use it
1269 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1270 # partial_ok, detect_failed)
1272 # FOR each nTDSConnection object cn such that the parent of cn is
1273 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1274 for ldsa in lbhs_all:
1275 for cn in ldsa.connect_table.values():
1277 rdsa = None
1278 for rdsa in rbhs_all:
1279 if cn.from_dnstr == rdsa.dsa_dnstr:
1280 break
1282 if rdsa is None:
1283 continue
1285 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1286 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1287 # cn!transportType references t
1288 if (cn.is_generated() and not cn.is_rodc_topology() and
1289 cn.transport_guid == transport.guid):
1291 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1292 # cn!options and cn!schedule != sch
1293 # Perform an originating update to set cn!schedule to
1294 # sched
1295 if (not cn.is_user_owned_schedule() and
1296 not cn.is_equivalent_schedule(link_sched)):
1297 cn.schedule = link_sched
1298 cn.set_modified(True)
1300 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1301 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1302 if cn.is_override_notify_default() and \
1303 cn.is_use_notify():
1305 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1306 # ri.Options
1307 # Perform an originating update to clear bits
1308 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1309 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1310 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
1311 cn.options &= \
1312 ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1313 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1314 cn.set_modified(True)
1316 # ELSE
1317 else:
1319 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1320 # ri.Options
1321 # Perform an originating update to set bits
1322 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1323 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1324 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1325 cn.options |= \
1326 (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1327 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1328 cn.set_modified(True)
1331 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1332 if cn.is_twoway_sync():
1334 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1335 # ri.Options
1336 # Perform an originating update to clear bit
1337 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1338 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
1339 cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1340 cn.set_modified(True)
1342 # ELSE
1343 else:
1345 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1346 # ri.Options
1347 # Perform an originating update to set bit
1348 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1349 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1350 cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1351 cn.set_modified(True)
1354 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1355 # in cn!options
1356 if cn.is_intersite_compression_disabled():
1358 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1359 # in ri.Options
1360 # Perform an originating update to clear bit
1361 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1362 # cn!options
1363 if (link_opt &
1364 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0:
1365 cn.options &= \
1366 ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1367 cn.set_modified(True)
1369 # ELSE
1370 else:
1371 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1372 # ri.Options
1373 # Perform an originating update to set bit
1374 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1375 # cn!options
1376 if (link_opt &
1377 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0:
1378 cn.options |= \
1379 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1380 cn.set_modified(True)
1382 # Display any modified connection
1383 if opts.readonly:
1384 if cn.to_be_modified:
1385 logger.info("TO BE MODIFIED:\n%s" % cn)
1387 ldsa.commit_connections(self.samdb, ro=True)
1388 else:
1389 ldsa.commit_connections(self.samdb)
1390 # ENDFOR
1392 valid_connections = 0
1394 # FOR each nTDSConnection object cn such that cn!parent is
1395 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1396 for ldsa in lbhs_all:
1397 for cn in ldsa.connect_table.values():
1399 rdsa = None
1400 for rdsa in rbhs_all:
1401 if cn.from_dnstr == rdsa.dsa_dnstr:
1402 break
1404 if rdsa is None:
1405 continue
1407 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1408 # cn!transportType references t) and
1409 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1410 if ((not cn.is_generated() or
1411 cn.transport_guid == transport.guid) and
1412 not cn.is_rodc_topology()):
1414 # LET rguid be the objectGUID of the nTDSDSA object
1415 # referenced by cn!fromServer
1416 # LET lguid be (cn!parent)!objectGUID
1418 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1419 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1420 # Increment cValidConnections by 1
1421 if (not self.is_bridgehead_failed(rdsa, detect_failed) and
1422 not self.is_bridgehead_failed(ldsa, detect_failed)):
1423 valid_connections += 1
1425 # IF keepConnections does not contain cn!objectGUID
1426 # APPEND cn!objectGUID to keepConnections
1427 if not self.keep_connection(cn):
1428 self.keep_connection_list.append(cn)
1430 # ENDFOR
1432 # IF cValidConnections = 0
1433 if valid_connections == 0:
1435 # LET opt be NTDSCONN_OPT_IS_GENERATED
1436 opt = dsdb.NTDSCONN_OPT_IS_GENERATED
1438 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1439 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1440 # NTDSCONN_OPT_USE_NOTIFY in opt
1441 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1442 opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1443 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1445 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1446 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1447 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1448 opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1450 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1451 # ri.Options
1452 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1453 if (link_opt &
1454 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0:
1455 opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1457 # Perform an originating update to create a new nTDSConnection
1458 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1459 # cn!options = opt, cn!transportType is a reference to t,
1460 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1461 cn = lbh.new_connection(opt, 0, transport, lbh.dsa_dnstr, link_sched)
1463 # Display any added connection
1464 if opts.readonly:
1465 if cn.to_be_added:
1466 logger.info("TO BE ADDED:\n%s" % cn)
1468 lbh.commit_connections(self.samdb, ro=True)
1469 else:
1470 lbh.commit_connections(self.samdb)
1472 # APPEND cn!objectGUID to keepConnections
1473 if not self.keep_connection(cn):
1474 self.keep_connection_list.append(cn)
1476 def add_transports(self, vertex, local_vertex, graph, detect_failed):
1478 # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
1479 # here and in the, but using vertex seems to make more
1480 # sense. That is, it wants this:
1482 #bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1483 # local_vertex.is_black(), detect_failed)
1485 # TODO WHY?????
1487 vertex.accept_red_red = []
1488 vertex.accept_black = []
1489 found_failed = False
1490 for t_guid, transport in self.transport_table.items():
1491 if transport.name != 'IP':
1492 continue
1493 DEBUG_BLUE(t_guid)
1494 # FLAG_CR_NTDS_DOMAIN 0x00000002
1495 if (vertex.is_red() and transport.name != "IP" and
1496 vertex.part.system_flags & 0x00000002):
1497 continue
1499 if vertex not in graph.connected_vertices:
1500 continue
1502 partial_replica_okay = vertex.is_black()
1503 bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1504 partial_replica_okay, detect_failed)
1505 if bh is None:
1506 found_failed = True
1507 continue
1509 vertex.accept_red_red.append(t_guid)
1510 vertex.accept_black.append(t_guid)
1512 # Add additional transport to allow another run of Dijkstra
1513 vertex.accept_red_red.append("EDGE_TYPE_ALL")
1514 vertex.accept_black.append("EDGE_TYPE_ALL")
1516 return found_failed
1518 def create_connections(self, graph, part, detect_failed):
1519 """Construct an NC replica graph for the NC identified by
1520 the given crossRef, then create any additional nTDSConnection
1521 objects required.
1523 :param graph: site graph.
1524 :param part: crossRef object for NC.
1525 :param detect_failed: True to detect failed DCs and route
1526 replication traffic around them, False to assume no DC
1527 has failed.
1529 Modifies self.keep_connection_list by adding any connections
1530 deemed to be "in use".
1532 ::returns: (all_connected, found_failed_dc)
1533 (all_connected) True if the resulting NC replica graph
1534 connects all sites that need to be connected.
1535 (found_failed_dc) True if one or more failed DCs were
1536 detected.
1538 all_connected = True
1539 found_failed = False
1541 logger.debug("create_connections(): enter\n\tpartdn=%s\n\tdetect_failed=%s" %
1542 (part.nc_dnstr, detect_failed))
1544 # XXX - This is a highly abbreviated function from the MS-TECH
1545 # ref. It creates connections between bridgeheads to all
1546 # sites that have appropriate replicas. Thus we are not
1547 # creating a minimum cost spanning tree but instead
1548 # producing a fully connected tree. This should produce
1549 # a full (albeit not optimal cost) replication topology.
1551 my_vertex = Vertex(self.my_site, part)
1552 my_vertex.color_vertex()
1554 for v in graph.vertices:
1555 v.color_vertex()
1556 if self.add_transports(v, my_vertex, graph, False):
1557 found_failed = True
1559 # No NC replicas for this NC in the site of the local DC,
1560 # so no nTDSConnection objects need be created
1561 if my_vertex.is_white():
1562 return all_connected, found_failed
1564 edge_list, component_count = self.get_spanning_tree_edges(graph, label=part.partstr)
1566 logger.debug("%s Number of components: %d" % (part.nc_dnstr, component_count))
1567 if component_count > 1:
1568 all_connected = False
1570 # LET partialReplicaOkay be TRUE if and only if
1571 # localSiteVertex.Color = COLOR.BLACK
1572 if my_vertex.is_black():
1573 partial_ok = True
1574 else:
1575 partial_ok = False
1577 # Utilize the IP transport only for now
1578 transport = None
1579 for transport in self.transport_table.values():
1580 if transport.name == "IP":
1581 break
1583 if transport is None:
1584 raise Exception("Unable to find inter-site transport for IP")
1586 for e in edge_list:
1587 if e.directed and e.vertices[0].site is self.my_site: # more accurate comparison?
1588 continue
1590 if e.vertices[0].site is self.my_site:
1591 rsite = e.vertices[1].site
1592 else:
1593 rsite = e.vertices[0].site
1595 # We don't make connections to our own site as that
1596 # is intrasite topology generator's job
1597 if rsite is self.my_site:
1598 continue
1600 # Determine bridgehead server in remote site
1601 rbh = self.get_bridgehead(rsite, part, transport,
1602 partial_ok, detect_failed)
1604 # RODC acts as an BH for itself
1605 # IF AmIRODC() then
1606 # LET lbh be the nTDSDSA object of the local DC
1607 # ELSE
1608 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1609 # cr, t, partialReplicaOkay, detectFailedDCs)
1610 if self.my_dsa.is_ro():
1611 lsite = self.my_site
1612 lbh = self.my_dsa
1613 else:
1614 lsite = self.my_site
1615 lbh = self.get_bridgehead(lsite, part, transport,
1616 partial_ok, detect_failed)
1617 # TODO
1618 if lbh is None:
1619 return False, True
1621 sitelink = e.site_link
1622 if sitelink is None:
1623 link_opt = 0x0
1624 link_sched = None
1625 else:
1626 link_opt = sitelink.options
1627 link_sched = sitelink.schedule
1629 self.create_connection(part, rbh, rsite, transport,
1630 lbh, lsite, link_opt, link_sched,
1631 partial_ok, detect_failed)
1633 return all_connected, found_failed
1635 def create_intersite_connections(self):
1636 """Computes an NC replica graph for each NC replica that "should be
1637 present" on the local DC or "is present" on any DC in the same site
1638 as the local DC. For each edge directed to an NC replica on such a
1639 DC from an NC replica on a DC in another site, the KCC creates an
1640 nTDSConnection object to imply that edge if one does not already
1641 exist.
1643 Modifies self.keep_connection_list - A list of nTDSConnection
1644 objects for edges that are directed
1645 to the local DC's site in one or more NC replica graphs.
1647 returns: True if spanning trees were created for all NC replica
1648 graphs, otherwise False.
1650 all_connected = True
1651 self.keep_connection_list = []
1653 # LET crossRefList be the set containing each object o of class
1654 # crossRef such that o is a child of the CN=Partitions child of the
1655 # config NC
1657 # FOR each crossRef object cr in crossRefList
1658 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1659 # is clear in cr!systemFlags, skip cr.
1660 # LET g be the GRAPH return of SetupGraph()
1662 for part in self.part_table.values():
1664 if not part.is_enabled():
1665 continue
1667 if part.is_foreign():
1668 continue
1670 graph = self.setup_graph(part)
1672 # Create nTDSConnection objects, routing replication traffic
1673 # around "failed" DCs.
1674 found_failed = False
1676 connected, found_failed = self.create_connections(graph, part, True)
1678 if not connected:
1679 all_connected = False
1681 if found_failed:
1682 # One or more failed DCs preclude use of the ideal NC
1683 # replica graph. Add connections for the ideal graph.
1684 self.create_connections(graph, part, False)
1686 return all_connected
1688 def get_spanning_tree_edges(self, graph, label=None):
1689 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
1690 # just the shortest-paths connecting colored vertices
1692 internal_edges = set()
1694 for e_set in graph.edge_set:
1695 edgeType = None
1696 for v in graph.vertices:
1697 v.edges = []
1699 # All con_type in an edge set is the same
1700 for e in e_set.edges:
1701 edgeType = e.con_type
1702 for v in e.vertices:
1703 v.edges.append(e)
1705 if opts.verify or opts.dot_files:
1706 graph_edges = [(a.site.site_dnstr, b.site.site_dnstr)
1707 for a, b in itertools.chain(*(itertools.combinations(edge.vertices, 2)
1708 for edge in e_set.edges))]
1709 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
1710 verify_properties = ('complete', 'connected', 'multi_edge_forest', 'forest',
1711 'directed_double_ring')
1713 if opts.dot_files:
1714 write_dot_file('edgeset_%s' % (edgeType,), graph_edges, vertices=graph_nodes,
1715 label=label)
1717 if opts.verify:
1718 verify_graph('spanning tree edge set %s' % edgeType, graph_edges, vertices=graph_nodes,
1719 properties=verify_properties, debug=DEBUG)
1721 # Run dijkstra's algorithm with just the red vertices as seeds
1722 # Seed from the full replicas
1723 dijkstra(graph, edgeType, False)
1725 # Process edge set
1726 process_edge_set(graph, e_set, internal_edges)
1728 # Run dijkstra's algorithm with red and black vertices as the seeds
1729 # Seed from both full and partial replicas
1730 dijkstra(graph, edgeType, True)
1732 # Process edge set
1733 process_edge_set(graph, e_set, internal_edges)
1735 # All vertices have root/component as itself
1736 setup_vertices(graph)
1737 process_edge_set(graph, None, internal_edges)
1739 if opts.verify or opts.dot_files:
1740 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr) for e in internal_edges]
1741 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
1742 verify_properties = ('complete', 'connected', 'multi_edge_forest', 'forest',
1743 'directed_double_ring')
1744 verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label,
1745 properties=verify_properties, debug=DEBUG, verify=opts.verify,
1746 dot_files=opts.dot_files)
1749 # Phase 2: Run Kruskal's on the internal edges
1750 output_edges, components = kruskal(graph, internal_edges)
1752 # This recalculates the cost for the path connecting the closest red vertex
1753 # Ignoring types is fine because NO suboptimal edge should exist in the graph
1754 dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename
1755 # Phase 3: Process the output
1756 for v in graph.vertices:
1757 if v.is_red():
1758 v.dist_to_red = 0
1759 else:
1760 v.dist_to_red = v.repl_info.cost
1762 if opts.verify or opts.dot_files:
1763 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr) for e in internal_edges]
1764 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
1765 verify_properties = ('complete', 'connected', 'multi_edge_forest', 'forest', 'directed_double_ring')
1766 verify_and_dot('postkruskal', graph_edges, graph_nodes, label=label,
1767 properties=verify_properties, debug=DEBUG, verify=opts.verify,
1768 dot_files=opts.dot_files)
1770 # count the components
1771 return self.copy_output_edges(graph, output_edges), components
1773 # This ensures only one-way connections for partial-replicas
1774 def copy_output_edges(self, graph, output_edges):
1775 edge_list = []
1776 vid = self.my_site # object guid for the local dc's site
1778 for edge in output_edges:
1779 # Three-way edges are no problem here since these were created by
1780 # add_out_edge which only has two endpoints
1781 v = edge.vertices[0]
1782 w = edge.vertices[1]
1783 if v.site is vid or w.site is vid:
1784 if (v.is_black() or w.is_black()) and not v.dist_to_red == MAX_DWORD:
1785 edge.directed = True
1787 if w.dist_to_red < v.dist_to_red:
1788 edge.vertices[0] = w
1789 edge.vertices[1] = v
1791 edge_list.append(edge)
1793 return edge_list
1795 def intersite(self):
1796 """The head method for generating the inter-site KCC replica
1797 connection graph and attendant nTDSConnection objects
1798 in the samdb.
1800 Produces self.keep_connection_list[] of NTDS Connections
1801 that should be kept during subsequent pruning process.
1803 ::return (True or False): (True) if the produced NC replica
1804 graph connects all sites that need to be connected
1807 # Retrieve my DSA
1808 mydsa = self.my_dsa
1809 mysite = self.my_site
1810 all_connected = True
1812 logger.debug("intersite(): enter")
1814 # Determine who is the ISTG
1815 if opts.readonly:
1816 mysite.select_istg(self.samdb, mydsa, ro=True)
1817 else:
1818 mysite.select_istg(self.samdb, mydsa, ro=False)
1820 # Test whether local site has topology disabled
1821 if mysite.is_intersite_topology_disabled():
1822 logger.debug("intersite(): exit disabled all_connected=%d" %
1823 all_connected)
1824 return all_connected
1826 if not mydsa.is_istg():
1827 logger.debug("intersite(): exit not istg all_connected=%d" %
1828 all_connected)
1829 return all_connected
1831 self.merge_failed_links()
1833 # For each NC with an NC replica that "should be present" on the
1834 # local DC or "is present" on any DC in the same site as the
1835 # local DC, the KCC constructs a site graph--a precursor to an NC
1836 # replica graph. The site connectivity for a site graph is defined
1837 # by objects of class interSiteTransport, siteLink, and
1838 # siteLinkBridge in the config NC.
1840 all_connected = self.create_intersite_connections()
1842 logger.debug("intersite(): exit all_connected=%d" % all_connected)
1843 return all_connected
1845 def update_rodc_connection(self):
1846 """Runs when the local DC is an RODC and updates the RODC NTFRS
1847 connection object.
1849 # Given an nTDSConnection object cn1, such that cn1.options contains
1850 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1851 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1852 # that the following is true:
1854 # cn1.fromServer = cn2.fromServer
1855 # cn1.schedule = cn2.schedule
1857 # If no such cn2 can be found, cn1 is not modified.
1858 # If no such cn1 can be found, nothing is modified by this task.
1860 if not self.my_dsa.is_ro():
1861 return
1863 cn2 = None
1864 # Find cn2 - the DRS NTDSConnection
1865 for con in self.my_dsa.connect_table.values():
1866 if not con.is_rodc_topology():
1867 cn2 = con
1868 break
1870 # Find cn1 - the FRS NTDSConnection
1871 if cn2 is not None:
1872 for con in self.my_dsa.connect_table.values():
1873 if con.is_rodc_topology():
1874 con.from_dnstr = cn2.from_dnstr
1875 con.schedule = cn2.schedule
1876 con.to_be_modified = True
1878 # Commit changes to the database
1879 self.my_dsa.commit_connections(self.samdb, ro=opts.readonly)
1881 def intrasite_max_node_edges(self, node_count):
1882 """Returns the maximum number of edges directed to a node in
1883 the intrasite replica graph.
1885 The KCC does not create more
1886 than 50 edges directed to a single DC. To optimize replication,
1887 we compute that each node should have n+2 total edges directed
1888 to it such that (n) is the smallest non-negative integer
1889 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1891 (If the number of edges is m (i.e. n + 2), that is the same as
1892 2 * m*m - 2 * m + 3).
1894 edges n nodecount
1895 2 0 7
1896 3 1 15
1897 4 2 27
1898 5 3 43
1900 50 48 4903
1902 :param node_count: total number of nodes in the replica graph
1904 n = 0
1905 while True:
1906 if node_count <= (2 * (n * n) + (6 * n) + 7):
1907 break
1908 n = n + 1
1909 n = n + 2
1910 if n < 50:
1911 return n
1912 return 50
1914 def construct_intrasite_graph(self, site_local, dc_local,
1915 nc_x, gc_only, detect_stale):
1916 # [MS-ADTS] 6.2.2.2
1917 # We're using the MS notation names here to allow
1918 # correlation back to the published algorithm.
1920 # nc_x - naming context (x) that we are testing if it
1921 # "should be present" on the local DC
1922 # f_of_x - replica (f) found on a DC (s) for NC (x)
1923 # dc_s - DC where f_of_x replica was found
1924 # dc_local - local DC that potentially needs a replica
1925 # (f_of_x)
1926 # r_list - replica list R
1927 # p_of_x - replica (p) is partial and found on a DC (s)
1928 # for NC (x)
1929 # l_of_x - replica (l) is the local replica for NC (x)
1930 # that should appear on the local DC
1931 # r_len = is length of replica list |R|
1933 # If the DSA doesn't need a replica for this
1934 # partition (NC x) then continue
1935 needed, ro, partial = nc_x.should_be_present(dc_local)
1937 DEBUG_YELLOW("construct_intrasite_graph(): enter" +
1938 "\n\tgc_only=%d" % gc_only +
1939 "\n\tdetect_stale=%d" % detect_stale +
1940 "\n\tneeded=%s" % needed +
1941 "\n\tro=%s" % ro +
1942 "\n\tpartial=%s" % partial +
1943 "\n%s" % nc_x)
1945 if not needed:
1946 DEBUG_RED("%s lacks 'should be present' status, aborting construct_intersite_graph!" %
1947 nc_x.nc_dnstr)
1948 return
1950 # Create a NCReplica that matches what the local replica
1951 # should say. We'll use this below in our r_list
1952 l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
1953 nc_x.nc_dnstr)
1955 l_of_x.identify_by_basedn(self.samdb)
1957 l_of_x.rep_partial = partial
1958 l_of_x.rep_ro = ro
1960 # Add this replica that "should be present" to the
1961 # needed replica table for this DSA
1962 dc_local.add_needed_replica(l_of_x)
1964 # Replica list
1966 # Let R be a sequence containing each writable replica f of x
1967 # such that f "is present" on a DC s satisfying the following
1968 # criteria:
1970 # * s is a writable DC other than the local DC.
1972 # * s is in the same site as the local DC.
1974 # * If x is a read-only full replica and x is a domain NC,
1975 # then the DC's functional level is at least
1976 # DS_BEHAVIOR_WIN2008.
1978 # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
1979 # in the options attribute of the site settings object for
1980 # the local DC's site, or no tuple z exists in the
1981 # kCCFailedLinks or kCCFailedConnections variables such
1982 # that z.UUIDDsa is the objectGUID of the nTDSDSA object
1983 # for s, z.FailureCount > 0, and the current time -
1984 # z.TimeFirstFailure > 2 hours.
1986 r_list = []
1988 # We'll loop thru all the DSAs looking for
1989 # writeable NC replicas that match the naming
1990 # context dn for (nc_x)
1992 for dc_s in self.my_site.dsa_table.values():
1994 # If this partition (nc_x) doesn't appear as a
1995 # replica (f_of_x) on (dc_s) then continue
1996 if not nc_x.nc_dnstr in dc_s.current_rep_table:
1997 continue
1999 # Pull out the NCReplica (f) of (x) with the dn
2000 # that matches NC (x) we are examining.
2001 f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2003 # Replica (f) of NC (x) must be writable
2004 if f_of_x.is_ro():
2005 continue
2007 # Replica (f) of NC (x) must satisfy the
2008 # "is present" criteria for DC (s) that
2009 # it was found on
2010 if not f_of_x.is_present():
2011 continue
2013 # DC (s) must be a writable DSA other than
2014 # my local DC. In other words we'd only replicate
2015 # from other writable DC
2016 if dc_s.is_ro() or dc_s is dc_local:
2017 continue
2019 # Certain replica graphs are produced only
2020 # for global catalogs, so test against
2021 # method input parameter
2022 if gc_only and not dc_s.is_gc():
2023 continue
2025 # DC (s) must be in the same site as the local DC
2026 # as this is the intra-site algorithm. This is
2027 # handled by virtue of placing DSAs in per
2028 # site objects (see enclosing for() loop)
2030 # If NC (x) is intended to be read-only full replica
2031 # for a domain NC on the target DC then the source
2032 # DC should have functional level at minimum WIN2008
2034 # Effectively we're saying that in order to replicate
2035 # to a targeted RODC (which was introduced in Windows 2008)
2036 # then we have to replicate from a DC that is also minimally
2037 # at that level.
2039 # You can also see this requirement in the MS special
2040 # considerations for RODC which state that to deploy
2041 # an RODC, at least one writable domain controller in
2042 # the domain must be running Windows Server 2008
2043 if ro and not partial and nc_x.nc_type == NCType.domain:
2044 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2045 continue
2047 # If we haven't been told to turn off stale connection
2048 # detection and this dsa has a stale connection then
2049 # continue
2050 if detect_stale and self.is_stale_link_connection(dc_s):
2051 continue
2053 # Replica meets criteria. Add it to table indexed
2054 # by the GUID of the DC that it appears on
2055 r_list.append(f_of_x)
2057 # If a partial (not full) replica of NC (x) "should be present"
2058 # on the local DC, append to R each partial replica (p of x)
2059 # such that p "is present" on a DC satisfying the same
2060 # criteria defined above for full replica DCs.
2062 # XXX This loop and the previous one differ only in whether
2063 # the replica is partial or not. here we only accept partial
2064 # (because we're partial); before we only accepted full. Order
2065 # doen't matter (the list is sorted a few lines down) so these
2066 # loops could easily be merged. Or this could be a helper
2067 # function.
2069 if partial:
2071 # Now we loop thru all the DSAs looking for
2072 # partial NC replicas that match the naming
2073 # context dn for (NC x)
2074 for dc_s in self.my_site.dsa_table.values():
2076 # If this partition NC (x) doesn't appear as a
2077 # replica (p) of NC (x) on the dsa DC (s) then
2078 # continue
2079 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2080 continue
2082 # Pull out the NCReplica with the dn that
2083 # matches NC (x) we are examining.
2084 p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2086 # Replica (p) of NC (x) must be partial
2087 if not p_of_x.is_partial():
2088 continue
2090 # Replica (p) of NC (x) must satisfy the
2091 # "is present" criteria for DC (s) that
2092 # it was found on
2093 if not p_of_x.is_present():
2094 continue
2096 # DC (s) must be a writable DSA other than
2097 # my DSA. In other words we'd only replicate
2098 # from other writable DSA
2099 if dc_s.is_ro() or dc_s is dc_local:
2100 continue
2102 # Certain replica graphs are produced only
2103 # for global catalogs, so test against
2104 # method input parameter
2105 if gc_only and not dc_s.is_gc():
2106 continue
2108 # DC (s) must be in the same site as the local DC
2109 # as this is the intra-site algorithm. This is
2110 # handled by virtue of placing DSAs in per
2111 # site objects (see enclosing for() loop)
2113 # This criteria is moot (a no-op) for this case
2114 # because we are scanning for (partial = True). The
2115 # MS algorithm statement says partial replica scans
2116 # should adhere to the "same" criteria as full replica
2117 # scans so the criteria doesn't change here...its just
2118 # rendered pointless.
2120 # The case that is occurring would be a partial domain
2121 # replica is needed on a local DC global catalog. There
2122 # is no minimum windows behavior for those since GCs
2123 # have always been present.
2124 if ro and not partial and nc_x.nc_type == NCType.domain:
2125 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2126 continue
2128 # If we haven't been told to turn off stale connection
2129 # detection and this dsa has a stale connection then
2130 # continue
2131 if detect_stale and self.is_stale_link_connection(dc_s):
2132 continue
2134 # Replica meets criteria. Add it to table indexed
2135 # by the GUID of the DSA that it appears on
2136 r_list.append(p_of_x)
2138 # Append to R the NC replica that "should be present"
2139 # on the local DC
2140 r_list.append(l_of_x)
2142 r_list.sort(sort_replica_by_dsa_guid)
2143 DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr)) for x in r_list))
2144 r_len = len(r_list)
2146 max_node_edges = self.intrasite_max_node_edges(r_len)
2148 # Add a node for each r_list element to the replica graph
2149 graph_list = []
2150 for rep in r_list:
2151 node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
2152 graph_list.append(node)
2154 # For each r(i) from (0 <= i < |R|-1)
2155 i = 0
2156 while i < (r_len-1):
2157 # Add an edge from r(i) to r(i+1) if r(i) is a full
2158 # replica or r(i+1) is a partial replica
2159 if not r_list[i].is_partial() or r_list[i+1].is_partial():
2160 graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)
2162 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2163 # replica or ri is a partial replica.
2164 if not r_list[i+1].is_partial() or r_list[i].is_partial():
2165 graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
2166 i = i + 1
2168 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2169 # or r0 is a partial replica.
2170 if not r_list[r_len-1].is_partial() or r_list[0].is_partial():
2171 graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr)
2173 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2174 # r|R|-1 is a partial replica.
2175 if not r_list[0].is_partial() or r_list[r_len-1].is_partial():
2176 graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr)
2178 if opts.verify or opts.dot_files:
2179 dot_edges = []
2180 dot_vertices = set()
2181 for v1 in graph_list:
2182 dot_vertices.add(v1.dsa_dnstr)
2183 for v2 in v1.edge_from:
2184 dot_edges.append((v2, v1.dsa_dnstr))
2185 dot_vertices.add(v2)
2187 verify_properties = ('connected', 'directed_double_ring')
2188 verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
2189 label='%s__%s__%s' % (site_local.site_dnstr, nc_x.nc_type, nc_x.nc_dnstr),
2190 properties=verify_properties, debug=DEBUG, verify=opts.verify,
2191 dot_files=opts.dot_files, directed=True)
2195 # For each existing nTDSConnection object implying an edge
2196 # from rj of R to ri such that j != i, an edge from rj to ri
2197 # is not already in the graph, and the total edges directed
2198 # to ri is less than n+2, the KCC adds that edge to the graph.
2199 i = 0
2200 while i < r_len:
2201 dsa = self.my_site.dsa_table[graph_list[i].dsa_dnstr]
2202 graph_list[i].add_edges_from_connections(dsa)
2203 i = i + 1
2204 DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list))
2205 DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list))
2207 for tnode in graph_list:
2208 # To optimize replication latency in sites with many NC replicas, the
2209 # KCC adds new edges directed to ri to bring the total edges to n+2,
2210 # where the NC replica rk of R from which the edge is directed
2211 # is chosen at random such that k != i and an edge from rk to ri
2212 # is not already in the graph.
2214 # Note that the KCC tech ref does not give a number for the definition
2215 # of "sites with many NC replicas". At a bare minimum to satisfy
2216 # n+2 edges directed at a node we have to have at least three replicas
2217 # in |R| (i.e. if n is zero then at least replicas from two other graph
2218 # nodes may direct edges to us).
2219 if r_len >= 3 and not tnode.has_sufficient_edges():
2220 candidates = [x for x in graph_list if (x is not tnode and
2221 x.dsa_dnstr not in tnode.edge_from)]
2223 DEBUG_BLUE("looking for random link for %s. r_len %d, graph len %d candidates %d"
2224 % (tnode.dsa_dnstr, r_len, len(graph_list), len(candidates)))
2226 DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])
2228 while candidates and not tnode.has_sufficient_edges():
2229 other = random.choice(candidates)
2230 DEBUG("trying to add candidate %s" % other.dsa_dstr)
2231 if not tnode.add_edge_from(other):
2232 DEBUG_RED("could not add %s" % other.dsa_dstr)
2233 candidates.remove(other)
2234 else:
2235 DEBUG_CYAN("not adding links to %s: nodes %s, links is %s/%s" %
2236 (tnode.dsa_dnstr, r_len, len(tnode.edge_from), tnode.max_edges))
2239 # Print the graph node in debug mode
2240 logger.debug("%s" % tnode)
2242 # For each edge directed to the local DC, ensure a nTDSConnection
2243 # points to us that satisfies the KCC criteria
2245 if tnode.dsa_dnstr == dc_local.dsa_dnstr:
2246 tnode.add_connections_from_edges(dc_local)
2249 if opts.verify or opts.dot_files:
2250 dot_edges = []
2251 dot_vertices = set()
2252 for v1 in graph_list:
2253 dot_vertices.add(v1.dsa_dnstr)
2254 for v2 in v1.edge_from:
2255 dot_edges.append((v2, v1.dsa_dnstr))
2256 dot_vertices.add(v2)
2258 verify_properties = ('complete', 'connected', 'multi_edge_forest', 'forest', 'directed_double_ring')
2259 verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
2260 label='%s__%s__%s' % (site_local.site_dnstr, nc_x.nc_type, nc_x.nc_dnstr),
2261 properties=verify_properties, debug=DEBUG, verify=opts.verify,
2262 dot_files=opts.dot_files, directed=True)
2265 def intrasite(self):
2266 """The head method for generating the intra-site KCC replica
2267 connection graph and attendant nTDSConnection objects
2268 in the samdb
2270 # Retrieve my DSA
2271 mydsa = self.my_dsa
2273 logger.debug("intrasite(): enter")
2275 # Test whether local site has topology disabled
2276 mysite = self.site_table[str(self.my_site_guid)]
2277 if mysite.is_intrasite_topology_disabled():
2278 return
2280 detect_stale = (not mysite.is_detect_stale_disabled())
2281 for dnstr, connect in mydsa.connect_table.items():
2282 if connect.to_be_added:
2283 DEBUG_CYAN("TO BE ADDED:\n%s" % connect)
2285 # Loop thru all the partitions, with gc_only False
2286 for partdn, part in self.part_table.items():
2287 self.construct_intrasite_graph(mysite, mydsa, part, False,
2288 detect_stale)
2289 for dnstr, connect in mydsa.connect_table.items():
2290 if connect.to_be_added:
2291 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2294 # If the DC is a GC server, the KCC constructs an additional NC
2295 # replica graph (and creates nTDSConnection objects) for the
2296 # config NC as above, except that only NC replicas that "are present"
2297 # on GC servers are added to R.
2298 for dnstr, connect in mydsa.connect_table.items():
2299 if connect.to_be_added:
2300 DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)
2302 # Do it again, with gc_only True
2303 for partdn, part in self.part_table.items():
2304 if part.is_config():
2305 self.construct_intrasite_graph(mysite, mydsa, part, True,
2306 detect_stale)
2308 # The DC repeats the NC replica graph computation and nTDSConnection
2309 # creation for each of the NC replica graphs, this time assuming
2310 # that no DC has failed. It does so by re-executing the steps as
2311 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2312 # set in the options attribute of the site settings object for
2313 # the local DC's site. (ie. we set "detec_stale" flag to False)
2314 for dnstr, connect in mydsa.connect_table.items():
2315 if connect.to_be_added:
2316 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2318 # Loop thru all the partitions.
2319 for partdn, part in self.part_table.items():
2320 self.construct_intrasite_graph(mysite, mydsa, part, False,
2321 False) # don't detect stale
2323 # If the DC is a GC server, the KCC constructs an additional NC
2324 # replica graph (and creates nTDSConnection objects) for the
2325 # config NC as above, except that only NC replicas that "are present"
2326 # on GC servers are added to R.
2327 for dnstr, connect in mydsa.connect_table.items():
2328 if connect.to_be_added:
2329 DEBUG_RED("TO BE ADDED:\n%s" % connect)
2331 for partdn, part in self.part_table.items():
2332 if part.is_config():
2333 self.construct_intrasite_graph(mysite, mydsa, part, True,
2334 False) # don't detect stale
2336 if opts.readonly:
2337 # Display any to be added or modified repsFrom
2338 for dnstr, connect in mydsa.connect_table.items():
2339 if connect.to_be_deleted:
2340 logger.info("TO BE DELETED:\n%s" % connect)
2341 if connect.to_be_modified:
2342 logger.info("TO BE MODIFIED:\n%s" % connect)
2343 if connect.to_be_added:
2344 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
2346 mydsa.commit_connections(self.samdb, ro=True)
2347 else:
2348 # Commit any newly created connections to the samdb
2349 mydsa.commit_connections(self.samdb)
2352 def run(self, dburl, lp, creds, forced_local_dsa=None):
2353 """Method to perform a complete run of the KCC and
2354 produce an updated topology for subsequent NC replica
2355 syncronization between domain controllers
2357 # We may already have a samdb setup if we are
2358 # currently importing an ldif for a test run
2359 if self.samdb is None:
2360 try:
2361 self.samdb = SamDB(url=dburl,
2362 session_info=system_session(),
2363 credentials=creds, lp=lp)
2365 except ldb.LdbError, (num, msg):
2366 logger.error("Unable to open sam database %s : %s" %
2367 (dburl, msg))
2368 return 1
2370 if forced_local_dsa:
2371 self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % forced_local_dsa)
2373 try:
2374 # Setup
2375 self.load_my_site()
2376 self.load_my_dsa()
2378 self.load_all_sites()
2379 self.load_all_partitions()
2380 self.load_all_transports()
2381 self.load_all_sitelinks()
2383 if opts.verify or opts.dot_files:
2385 dot_edges = []
2386 for site in self.site_table.values():
2387 for dsa in site.dsa_table.values():
2388 for con in dsa.connect_table.values():
2389 dot_edges.append((dsa.dsa_dnstr, con.from_dnstr))
2390 verify_and_dot('dsa_initial', dot_edges, label=self.my_dsa_dnstr,
2391 properties=(), debug=DEBUG, verify=opts.verify,
2392 dot_files=opts.dot_files, directed=True)
2394 dot_edges = []
2395 for site in self.site_table.values():
2396 for dsa in site.dsa_table.values():
2397 c_rep = get_dsa_config_rep(dsa)
2398 c_rep.load_repsFrom(self.samdb)
2399 for x in c_rep.rep_repsFrom:
2400 #print dir(x)
2401 dot_edges.append((c_rep.rep_dsa_dnstr, x.nc_dnstr))
2403 verify_and_dot('config_repsFrom_initial', dot_edges, directed=True, label=self.my_dsa_dnstr,
2404 properties=(), debug=DEBUG, verify=opts.verify,
2405 dot_files=opts.dot_files)
2407 dot_edges = []
2408 for site in self.site_table.values():
2409 for dsa in site.dsa_table.values():
2410 for x in dsa.current_rep_table:
2411 dot_edges.append((dsa.dsa_dnstr, x))
2413 verify_and_dot('dsa_repsFrom_initial', dot_edges, directed=True, label=self.my_dsa_dnstr,
2414 properties=(), debug=DEBUG, verify=opts.verify,
2415 dot_files=opts.dot_files)
2417 dot_edges = []
2418 for link in self.sitelink_table.values():
2419 for a, b in itertools.combinations(link.site_list, 2):
2420 dot_edges.append((str(a), str(b)))
2421 verify_properties = ('connected',)
2422 verify_and_dot('dsa_sitelink_initial', dot_edges, directed=False, label=self.my_dsa_dnstr,
2423 properties=verify_properties, debug=DEBUG, verify=opts.verify,
2424 dot_files=opts.dot_files)
2428 # These are the published steps (in order) for the
2429 # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)
2431 # Step 1
2432 self.refresh_failed_links_connections()
2434 # Step 2
2435 self.intrasite()
2437 # Step 3
2438 all_connected = self.intersite()
2440 # Step 4
2441 self.remove_unneeded_ntdsconn(all_connected)
2443 # Step 5
2444 self.translate_ntdsconn()
2446 # Step 6
2447 self.remove_unneeded_failed_links_connections()
2449 # Step 7
2450 self.update_rodc_connection()
2453 if opts.verify or opts.dot_files:
2454 dot_edges = []
2455 for site in self.site_table.values():
2456 for dsa in site.dsa_table.values():
2457 for con in dsa.connect_table.values():
2458 dot_edges.append((dsa.dsa_dnstr, con.from_dnstr))
2459 verify_properties = ('complete', 'connected', 'multi_edge_forest', 'forest', 'directed_double_ring')
2460 verify_and_dot('dsa_final', dot_edges, label=self.my_dsa_dnstr,
2461 properties=verify_properties, debug=DEBUG, verify=opts.verify,
2462 dot_files=opts.dot_files, directed=True)
2464 except:
2465 raise
2467 return 0
2469 def import_ldif(self, dburl, lp, creds, ldif_file):
2470 """Routine to import all objects and attributes that are relevent
2471 to the KCC algorithms from a previously exported LDIF file.
2473 The point of this function is to allow a programmer/debugger to
2474 import an LDIF file with non-security relevent information that
2475 was previously extracted from a DC database. The LDIF file is used
2476 to create a temporary abbreviated database. The KCC algorithm can
2477 then run against this abbreviated database for debug or test
2478 verification that the topology generated is computationally the
2479 same between different OSes and algorithms.
2481 :param dburl: path to the temporary abbreviated db to create
2482 :param ldif_file: path to the ldif file to import
2484 if os.path.exists(dburl):
2485 logger.error("Specify a database (%s) that doesn't already exist." %
2486 dburl)
2487 return 1
2489 # Use ["modules:"] as we are attempting to build a sam
2490 # database as opposed to start it here.
2491 self.samdb = Ldb(url=dburl, session_info=system_session(),
2492 lp=lp, options=["modules:"])
2494 self.samdb.transaction_start()
2495 try:
2496 data = read_and_sub_file(ldif_file, None)
2497 self.samdb.add_ldif(data, None)
2498 if opts.forced_local_dsa:
2499 self.samdb.modify_ldif("""dn: @ROOTDSE
2500 changetype: modify
2501 replace: dsServiceName
2502 dsServiceName: CN=NTDS Settings,%s
2504 """ % opts.forced_local_dsa)
2506 except Exception, estr:
2507 logger.error("Failed to import %s: %s" % (ldif_file, estr))
2508 self.samdb.transaction_cancel()
2509 return 1
2510 else:
2511 self.samdb.transaction_commit()
2513 self.samdb = None
2515 # We have an abbreviated list of options here because we have built
2516 # an abbreviated database. We use the rootdse and extended-dn
2517 # modules only during this re-open
2518 self.samdb = SamDB(url=dburl, session_info=system_session(),
2519 credentials=creds, lp=lp,
2520 options=["modules:rootdse,extended_dn_in,extended_dn_out_ldb"])
2521 return 0
2523 def export_ldif(self, dburl, lp, creds, ldif_file):
2524 """Routine to extract all objects and attributes that are relevent
2525 to the KCC algorithms from a DC database.
2527 The point of this function is to allow a programmer/debugger to
2528 extract an LDIF file with non-security relevent information from
2529 a DC database. The LDIF file can then be used to "import" via
2530 the import_ldif() function this file into a temporary abbreviated
2531 database. The KCC algorithm can then run against this abbreviated
2532 database for debug or test verification that the topology generated
2533 is computationally the same between different OSes and algorithms.
2535 :param dburl: LDAP database URL to extract info from
2536 :param ldif_file: output LDIF file name to create
2538 try:
2539 self.samdb = SamDB(url=dburl,
2540 session_info=system_session(),
2541 credentials=creds, lp=lp)
2542 except ldb.LdbError, (enum, estr):
2543 logger.error("Unable to open sam database (%s) : %s" %
2544 (dburl, estr))
2545 return 1
2547 if os.path.exists(ldif_file):
2548 logger.error("Specify a file (%s) that doesn't already exist." %
2549 ldif_file)
2550 return 1
2552 try:
2553 f = open(ldif_file, "w")
2554 except IOError as ioerr:
2555 logger.error("Unable to open (%s) : %s" % (ldif_file, str(ioerr)))
2556 return 1
2558 try:
2559 # Query Partitions
2560 attrs = [ "objectClass",
2561 "objectGUID",
2562 "cn",
2563 "whenChanged",
2564 "objectSid",
2565 "Enabled",
2566 "systemFlags",
2567 "dnsRoot",
2568 "nCName",
2569 "msDS-NC-Replica-Locations",
2570 "msDS-NC-RO-Replica-Locations" ]
2572 sstr = "CN=Partitions,%s" % self.samdb.get_config_basedn()
2573 res = self.samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
2574 attrs=attrs,
2575 expression="(objectClass=crossRef)")
2577 # Write partitions output
2578 write_search_result(self.samdb, f, res)
2580 # Query cross reference container
2581 attrs = [ "objectClass",
2582 "objectGUID",
2583 "cn",
2584 "whenChanged",
2585 "fSMORoleOwner",
2586 "systemFlags",
2587 "msDS-Behavior-Version",
2588 "msDS-EnabledFeature" ]
2590 sstr = "CN=Partitions,%s" % self.samdb.get_config_basedn()
2591 res = self.samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
2592 attrs=attrs,
2593 expression="(objectClass=crossRefContainer)")
2595 # Write cross reference container output
2596 write_search_result(self.samdb, f, res)
2598 # Query Sites
2599 attrs = [ "objectClass",
2600 "objectGUID",
2601 "cn",
2602 "whenChanged",
2603 "systemFlags" ]
2605 sstr = "CN=Sites,%s" % self.samdb.get_config_basedn()
2606 sites = self.samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
2607 attrs=attrs,
2608 expression="(objectClass=site)")
2610 # Write sites output
2611 write_search_result(self.samdb, f, sites)
2613 # Query NTDS Site Settings
2614 for msg in sites:
2615 sitestr = str(msg.dn)
2617 attrs = [ "objectClass",
2618 "objectGUID",
2619 "cn",
2620 "whenChanged",
2621 "interSiteTopologyGenerator",
2622 "interSiteTopologyFailover",
2623 "schedule",
2624 "options" ]
2626 sstr = "CN=NTDS Site Settings,%s" % sitestr
2627 res = self.samdb.search(base=sstr, scope=ldb.SCOPE_BASE,
2628 attrs=attrs)
2630 # Write Site Settings output
2631 write_search_result(self.samdb, f, res)
2633 # Naming context list
2634 nclist = []
2636 # Query Directory Service Agents
2637 for msg in sites:
2638 sstr = str(msg.dn)
2640 ncattrs = [ "hasMasterNCs",
2641 "msDS-hasMasterNCs",
2642 "hasPartialReplicaNCs",
2643 "msDS-HasDomainNCs",
2644 "msDS-hasFullReplicaNCs",
2645 "msDS-HasInstantiatedNCs" ]
2646 attrs = [ "objectClass",
2647 "objectGUID",
2648 "cn",
2649 "whenChanged",
2650 "invocationID",
2651 "options",
2652 "msDS-isRODC",
2653 "msDS-Behavior-Version" ]
2655 res = self.samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
2656 attrs=attrs + ncattrs,
2657 expression="(objectClass=nTDSDSA)")
2659 # Spin thru all the DSAs looking for NC replicas
2660 # and build a list of all possible Naming Contexts
2661 # for subsequent retrieval below
2662 for msg in res:
2663 for k in msg.keys():
2664 if k in ncattrs:
2665 for value in msg[k]:
2666 # Some of these have binary DNs so
2667 # use dsdb_Dn to split out relevent parts
2668 dsdn = dsdb_Dn(self.samdb, value)
2669 dnstr = str(dsdn.dn)
2670 if dnstr not in nclist:
2671 nclist.append(dnstr)
2673 # Write DSA output
2674 write_search_result(self.samdb, f, res)
2676 # Query NTDS Connections
2677 for msg in sites:
2678 sstr = str(msg.dn)
2680 attrs = [ "objectClass",
2681 "objectGUID",
2682 "cn",
2683 "whenChanged",
2684 "options",
2685 "whenCreated",
2686 "enabledConnection",
2687 "schedule",
2688 "transportType",
2689 "fromServer",
2690 "systemFlags" ]
2692 res = self.samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
2693 attrs=attrs,
2694 expression="(objectClass=nTDSConnection)")
2695 # Write NTDS Connection output
2696 write_search_result(self.samdb, f, res)
2699 # Query Intersite transports
2700 attrs = [ "objectClass",
2701 "objectGUID",
2702 "cn",
2703 "whenChanged",
2704 "options",
2705 "name",
2706 "bridgeheadServerListBL",
2707 "transportAddressAttribute" ]
2709 sstr = "CN=Inter-Site Transports,CN=Sites,%s" % \
2710 self.samdb.get_config_basedn()
2711 res = self.samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
2712 attrs=attrs,
2713 expression="(objectClass=interSiteTransport)")
2715 # Write inter-site transport output
2716 write_search_result(self.samdb, f, res)
2718 # Query siteLink
2719 attrs = [ "objectClass",
2720 "objectGUID",
2721 "cn",
2722 "whenChanged",
2723 "systemFlags",
2724 "options",
2725 "schedule",
2726 "replInterval",
2727 "siteList",
2728 "cost" ]
2730 sstr = "CN=Sites,%s" % \
2731 self.samdb.get_config_basedn()
2732 res = self.samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
2733 attrs=attrs,
2734 expression="(objectClass=siteLink)",
2735 controls=['extended_dn:0'])
2737 # Write siteLink output
2738 write_search_result(self.samdb, f, res)
2740 # Query siteLinkBridge
2741 attrs = [ "objectClass",
2742 "objectGUID",
2743 "cn",
2744 "whenChanged",
2745 "siteLinkList" ]
2747 sstr = "CN=Sites,%s" % self.samdb.get_config_basedn()
2748 res = self.samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
2749 attrs=attrs,
2750 expression="(objectClass=siteLinkBridge)")
2752 # Write siteLinkBridge output
2753 write_search_result(self.samdb, f, res)
2755 # Query servers containers
2756 # Needed for samdb.server_site_name()
2757 attrs = [ "objectClass",
2758 "objectGUID",
2759 "cn",
2760 "whenChanged",
2761 "systemFlags" ]
2763 sstr = "CN=Sites,%s" % self.samdb.get_config_basedn()
2764 res = self.samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
2765 attrs=attrs,
2766 expression="(objectClass=serversContainer)")
2768 # Write servers container output
2769 write_search_result(self.samdb, f, res)
2771 # Query servers
2772 # Needed because some transport interfaces refer back to
2773 # attributes found in the server object. Also needed
2774 # so extended-dn will be happy with dsServiceName in rootDSE
2775 attrs = [ "objectClass",
2776 "objectGUID",
2777 "cn",
2778 "whenChanged",
2779 "systemFlags",
2780 "dNSHostName",
2781 "mailAddress" ]
2783 sstr = "CN=Sites,%s" % self.samdb.get_config_basedn()
2784 res = self.samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
2785 attrs=attrs,
2786 expression="(objectClass=server)")
2788 # Write server output
2789 write_search_result(self.samdb, f, res)
2791 # Query Naming Context replicas
2792 attrs = [ "objectClass",
2793 "objectGUID",
2794 "cn",
2795 "whenChanged",
2796 "objectSid",
2797 "fSMORoleOwner",
2798 "msDS-Behavior-Version",
2799 "repsFrom",
2800 "repsTo" ]
2802 for sstr in nclist:
2803 res = self.samdb.search(sstr, scope=ldb.SCOPE_BASE,
2804 attrs=attrs)
2806 # Write naming context output
2807 write_search_result(self.samdb, f, res)
2809 # Query rootDSE replicas
2810 attrs=[ "objectClass",
2811 "objectGUID",
2812 "cn",
2813 "whenChanged",
2814 "rootDomainNamingContext",
2815 "configurationNamingContext",
2816 "schemaNamingContext",
2817 "defaultNamingContext",
2818 "dsServiceName" ]
2820 sstr = ""
2821 res = self.samdb.search(sstr, scope=ldb.SCOPE_BASE,
2822 attrs=attrs)
2824 # Record the rootDSE object as a dn as it
2825 # would appear in the base ldb file. We have
2826 # to save it this way because we are going to
2827 # be importing as an abbreviated database.
2828 res[0].dn = ldb.Dn(self.samdb, "@ROOTDSE")
2830 # Write rootdse output
2831 write_search_result(self.samdb, f, res)
2833 except ldb.LdbError, (enum, estr):
2834 logger.error("Error processing (%s) : %s" % (sstr, estr))
2835 return 1
2837 f.close()
2838 return 0
2840 ##################################################
2841 # Global Functions
2842 ##################################################
2843 def sort_replica_by_dsa_guid(rep1, rep2):
2844 return cmp(ndr_pack(rep1.rep_dsa_guid), ndr_pack(rep2.rep_dsa_guid))
2846 def sort_dsa_by_gc_and_guid(dsa1, dsa2):
2847 if dsa1.is_gc() and not dsa2.is_gc():
2848 return -1
2849 if not dsa1.is_gc() and dsa2.is_gc():
2850 return +1
2851 return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))
2853 def is_smtp_replication_available():
2854 """Currently always returns false because Samba
2855 doesn't implement SMTP transfer for NC changes
2856 between DCs
2858 return False
2860 def write_search_result(samdb, f, res):
2861 for msg in res:
2862 lstr = samdb.write_ldif(msg, ldb.CHANGETYPE_NONE)
2863 f.write("%s" % lstr)
2865 def create_edge(con_type, site_link, guid_to_vertex):
2866 e = MultiEdge()
2867 e.site_link = site_link
2868 e.vertices = []
2869 for site_guid in site_link.site_list:
2870 if str(site_guid) in guid_to_vertex:
2871 e.vertices.extend(guid_to_vertex.get(str(site_guid)))
2872 e.repl_info.cost = site_link.cost
2873 e.repl_info.options = site_link.options
2874 e.repl_info.interval = site_link.interval
2875 e.repl_info.schedule = site_link.schedule
2876 e.con_type = con_type
2877 e.directed = False
2878 return e
2880 def create_auto_edge_set(graph, transport):
2881 e_set = MultiEdgeSet()
2882 e_set.guid = misc.GUID() # NULL guid, not associated with a SiteLinkBridge object
2883 for site_link in graph.edges:
2884 if site_link.con_type == transport:
2885 e_set.edges.append(site_link)
2887 return e_set
2889 def create_edge_set(graph, transport, site_link_bridge):
2890 # TODO not implemented - need to store all site link bridges
2891 e_set = MultiEdgeSet()
2892 # e_set.guid = site_link_bridge
2893 return e_set
2895 def setup_vertices(graph):
2896 for v in graph.vertices:
2897 if v.is_white():
2898 v.repl_info.cost = MAX_DWORD
2899 v.root = None
2900 v.component_id = None
2901 else:
2902 v.repl_info.cost = 0
2903 v.root = v
2904 v.component_id = v
2906 v.repl_info.interval = 0
2907 v.repl_info.options = 0xFFFFFFFF
2908 v.repl_info.schedule = None # TODO highly suspicious
2909 v.demoted = False
2911 def dijkstra(graph, edge_type, include_black):
2912 queue = []
2913 setup_dijkstra(graph, edge_type, include_black, queue)
2914 while len(queue) > 0:
2915 cost, guid, vertex = heapq.heappop(queue)
2916 for edge in vertex.edges:
2917 for v in edge.vertices:
2918 if v is not vertex:
2919 # add new path from vertex to v
2920 try_new_path(graph, queue, vertex, edge, v)
2922 def setup_dijkstra(graph, edge_type, include_black, queue):
2923 setup_vertices(graph)
2924 for vertex in graph.vertices:
2925 if vertex.is_white():
2926 continue
2928 if ((vertex.is_black() and not include_black)
2929 or edge_type not in vertex.accept_black
2930 or edge_type not in vertex.accept_red_red):
2931 vertex.repl_info.cost = MAX_DWORD
2932 vertex.root = None # NULL GUID
2933 vertex.demoted = True # Demoted appears not to be used
2934 else:
2935 heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex))
2937 def try_new_path(graph, queue, vfrom, edge, vto):
2938 newRI = ReplInfo()
2939 # What this function checks is that there is a valid time frame for
2940 # which replication can actually occur, despite being adequately
2941 # connected
2942 intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI)
2944 # If the new path costs more than the current, then ignore the edge
2945 if newRI.cost > vto.repl_info.cost:
2946 return
2948 if newRI.cost < vto.repl_info.cost and not intersect:
2949 return
2951 new_duration = total_schedule(newRI.schedule)
2952 old_duration = total_schedule(vto.repl_info.schedule)
2954 # Cheaper or longer schedule
2955 if newRI.cost < vto.repl_info.cost or new_duration > old_duration:
2956 vto.root = vfrom.root
2957 vto.component_id = vfrom.component_id
2958 vto.repl_info = newRI
2959 heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))
2961 def check_demote_vertex(vertex, edge_type):
2962 if vertex.is_white():
2963 return
2965 # Accepts neither red-red nor black edges, demote
2966 if edge_type not in vertex.accept_black and edge_type not in vertex.accept_red_red:
2967 vertex.repl_info.cost = MAX_DWORD
2968 vertex.root = None
2969 vertex.demoted = True # Demoted appears not to be used
2971 def undemote_vertex(vertex):
2972 if vertex.is_white():
2973 return
2975 vertex.repl_info.cost = 0
2976 vertex.root = vertex
2977 vertex.demoted = False
2979 def process_edge_set(graph, e_set, internal_edges):
2980 if e_set is None:
2981 for edge in graph.edges:
2982 for vertex in edge.vertices:
2983 check_demote_vertex(vertex, edge.con_type)
2984 process_edge(graph, edge, internal_edges)
2985 for vertex in edge.vertices:
2986 undemote_vertex(vertex)
2987 else:
2988 for edge in e_set.edges:
2989 process_edge(graph, edge, internal_edges)
2991 def process_edge(graph, examine, internal_edges):
2992 # Find the set of all vertices touches the edge to examine
2993 vertices = []
2994 for v in examine.vertices:
2995 # Append a 4-tuple of color, repl cost, guid and vertex
2996 vertices.append((v.color, v.repl_info.cost, v.guid, v))
2997 # Sort by color, lower
2998 vertices.sort()
3000 color, cost, guid, bestv = vertices[0]
3001 # Add to internal edges an edge from every colored vertex to bestV
3002 for v in examine.vertices:
3003 if v.component_id is None or v.root is None:
3004 continue
3006 # Only add edge if valid inter-tree edge - needs a root and
3007 # different components
3008 if (bestv.component_id is not None and bestv.root is not None
3009 and v.component_id is not None and v.root is not None and
3010 bestv.component_id != v.component_id):
3011 add_int_edge(graph, internal_edges, examine, bestv, v)
3013 # Add internal edge, endpoints are roots of the vertices to pass in and are always colored
3014 def add_int_edge(graph, internal_edges, examine, v1, v2):
3015 root1 = v1.root
3016 root2 = v2.root
3018 red_red = False
3019 if root1.is_red() and root2.is_red():
3020 red_red = True
3022 if red_red:
3023 if (examine.con_type not in root1.accept_red_red
3024 or examine.con_type not in root2.accept_red_red):
3025 return
3026 else:
3027 if (examine.con_type not in root1.accept_black
3028 or examine.con_type not in root2.accept_black):
3029 return
3031 ri = ReplInfo()
3032 ri2 = ReplInfo()
3034 # Create the transitive replInfo for the two trees and this edge
3035 if not combine_repl_info(v1.repl_info, v2.repl_info, ri):
3036 return
3037 # ri is now initialized
3038 if not combine_repl_info(ri, examine.repl_info, ri2):
3039 return
3041 newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type)
3042 # Order by vertex guid
3043 #XXX guid comparison using ndr_pack
3044 if newIntEdge.v1.ndrpacked_guid > newIntEdge.v2.ndrpacked_guid:
3045 newIntEdge.v1 = root2
3046 newIntEdge.v2 = root1
3048 internal_edges.add(newIntEdge)
3050 def kruskal(graph, edges):
3051 for v in graph.vertices:
3052 v.edges = []
3054 components = set([x for x in graph.vertices if not x.is_white()])
3055 edges = list(edges)
3057 # Sorted based on internal comparison function of internal edge
3058 edges.sort()
3060 expected_num_tree_edges = 0 # TODO this value makes little sense
3062 count_edges = 0
3063 output_edges = []
3064 index = 0
3065 while index < len(edges): # TODO and num_components > 1
3066 e = edges[index]
3067 parent1 = find_component(e.v1)
3068 parent2 = find_component(e.v2)
3069 if parent1 is not parent2:
3070 count_edges += 1
3071 add_out_edge(graph, output_edges, e)
3072 parent1.component_id = parent2
3073 components.discard(parent1)
3075 index += 1
3077 return output_edges, len(components)
3079 def find_component(vertex):
3080 if vertex.component_id is vertex:
3081 return vertex
3083 current = vertex
3084 while current.component_id is not current:
3085 current = current.component_id
3087 root = current
3088 current = vertex
3089 while current.component_id is not root:
3090 n = current.component_id
3091 current.component_id = root
3092 current = n
3094 return root
3096 def add_out_edge(graph, output_edges, e):
3097 v1 = e.v1
3098 v2 = e.v2
3100 # This multi-edge is a 'real' edge with no GUID
3101 ee = MultiEdge()
3102 ee.directed = False
3103 ee.vertices.append(v1)
3104 ee.vertices.append(v2)
3105 ee.con_type = e.e_type
3106 ee.repl_info = e.repl_info
3107 output_edges.append(ee)
3109 v1.edges.append(ee)
3110 v2.edges.append(ee)
3114 ##################################################
3115 # samba_kcc entry point
3116 ##################################################
3118 parser = optparse.OptionParser("samba_kcc [options]")
3119 sambaopts = options.SambaOptions(parser)
3120 credopts = options.CredentialsOptions(parser)
3122 parser.add_option_group(sambaopts)
3123 parser.add_option_group(credopts)
3124 parser.add_option_group(options.VersionOptions(parser))
3126 parser.add_option("--readonly", default=False,
3127 help="compute topology but do not update database",
3128 action="store_true")
3130 parser.add_option("--debug",
3131 help="debug output",
3132 action="store_true")
3134 parser.add_option("--verify",
3135 help="verify that assorted invariants are kept",
3136 action="store_true")
3138 parser.add_option("--list-verify-tests",
3139 help="list what verification actions are available and do nothing else",
3140 action="store_true")
3142 parser.add_option("--no-dot-files", dest='dot_files',
3143 help="Don't write dot graph files in /tmp",
3144 default=True, action="store_false")
3146 parser.add_option("--seed",
3147 help="random number seed",
3148 type=int)
3150 parser.add_option("--importldif",
3151 help="import topology ldif file",
3152 type=str, metavar="<file>")
3154 parser.add_option("--exportldif",
3155 help="export topology ldif file",
3156 type=str, metavar="<file>")
3158 parser.add_option("-H", "--URL" ,
3159 help="LDB URL for database or target server",
3160 type=str, metavar="<URL>", dest="dburl")
3162 parser.add_option("--tmpdb",
3163 help="schemaless database file to create for ldif import",
3164 type=str, metavar="<file>")
3166 parser.add_option("--now",
3167 help="assume current time is this ('YYYYmmddHHMMSS[tz]', default: system time)",
3168 type=str, metavar="<date>")
3170 parser.add_option("--forced-local-dsa",
3171 help="run calculations assuming the DSA is this DN",
3172 type=str, metavar="<DSA>")
3175 logger = logging.getLogger("samba_kcc")
3176 logger.addHandler(logging.StreamHandler(sys.stdout))
3177 DEBUG = logger.debug
3179 from functools import partial
3180 def _colour_debug(*args, **kwargs):
3181 DEBUG('%s%s%s' % (kwargs['colour'], args[0], C_NORMAL), *args[1:])
3183 _globals = globals()
3184 for _colour in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW',
3185 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA',
3186 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'):
3187 _globals['DEBUG_' + _colour] = partial(_colour_debug, colour=_globals[_colour])
3190 lp = sambaopts.get_loadparm()
3191 creds = credopts.get_credentials(lp, fallback_machine=True)
3193 opts, args = parser.parse_args()
3195 if opts.list_verify_tests:
3196 list_verify_tests()
3197 sys.exit(0)
3199 if opts.debug:
3200 logger.setLevel(logging.DEBUG)
3201 elif opts.readonly:
3202 logger.setLevel(logging.INFO)
3203 else:
3204 logger.setLevel(logging.WARNING)
3206 # initialize seed from optional input parameter
3207 if opts.seed:
3208 random.seed(opts.seed)
3209 else:
3210 random.seed(0xACE5CA11)
3212 if opts.dburl is None:
3213 opts.dburl = lp.samdb_url()
3215 if opts.now:
3216 for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"):
3217 try:
3218 now_tuple = time.strptime(opts.now, timeformat)
3219 break
3220 except ValueError:
3221 pass
3222 else:
3223 # else happens if break doesn't --> no match
3224 print >> sys.stderr, "could not parse time '%s'" % opts.now
3225 sys.exit(1)
3227 unix_now = int(time.mktime(now_tuple))
3228 else:
3229 unix_now = int(time.time())
3232 # Instantiate Knowledge Consistency Checker and perform run
3233 kcc = KCC()
3235 if opts.exportldif:
3236 rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
3237 sys.exit(rc)
3239 if opts.importldif:
3240 if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
3241 logger.error("Specify a target temp database file with --tmpdb option.")
3242 sys.exit(1)
3244 rc = kcc.import_ldif(opts.tmpdb, lp, creds, opts.importldif)
3245 if rc != 0:
3246 sys.exit(rc)
3248 rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa)
3249 sys.exit(rc)