samba_kcc: Add basic skeleton for KCC intersite algorithm
[Samba.git] / source4 / scripting / bin / samba_kcc
blobf57884c6b3a39f13b60d0ba87370fe2e3b59bff5
1 #!/usr/bin/env python
3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 3 of the License, or
10 # (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 import os
21 import sys
22 import random
24 # ensure we get messages out immediately, so they get in the samba logs,
25 # and don't get swallowed by a timeout
26 os.environ['PYTHONUNBUFFERED'] = '1'
28 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
29 # heimdal can get mutual authentication errors due to the 24 second difference
30 # between UTC and GMT when using some zone files (eg. the PDT zone from
31 # the US)
32 os.environ["TZ"] = "GMT"
34 # Find right directory when running from source tree
35 sys.path.insert(0, "bin/python")
37 import optparse
38 import logging
40 from samba import (
41 getopt as options,
42 Ldb,
43 ldb,
44 dsdb,
45 read_and_sub_file,
46 drs_utils,
47 nttime2unix)
48 from samba.auth import system_session
49 from samba.samdb import SamDB
50 from samba.dcerpc import drsuapi
51 from samba.kcc_utils import *
53 import heapq
55 class KCC(object):
56 """The Knowledge Consistency Checker class.
58 A container for objects and methods allowing a run of the KCC. Produces a
59 set of connections in the samdb for which the Distributed Replication
60 Service can then utilize to replicate naming contexts
61 """
62 def __init__(self):
63 """Initializes the partitions class which can hold
64 our local DCs partitions or all the partitions in
65 the forest
66 """
67 self.part_table = {} # partition objects
68 self.site_table = {}
69 self.transport_table = {}
70 self.sitelink_table = {}
72 # TODO: These should be backed by a 'permanent' store so that when
73 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
74 # the failure information can be returned
75 self.kcc_failed_links = {}
76 self.kcc_failed_connections = set()
78 # Used in inter-site topology computation. A list
79 # of connections (by NTDSConnection object) that are
80 # to be kept when pruning un-needed NTDS Connections
81 self.keep_connection_list = []
83 self.my_dsa_dnstr = None # My dsa DN
84 self.my_dsa = None # My dsa object
86 self.my_site_dnstr = None
87 self.my_site = None
89 self.samdb = None
91 def load_all_transports(self):
92 """Loads the inter-site transport objects for Sites
94 ::returns: Raises an Exception on error
95 """
96 try:
97 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
98 self.samdb.get_config_basedn(),
99 scope=ldb.SCOPE_SUBTREE,
100 expression="(objectClass=interSiteTransport)")
101 except ldb.LdbError, (enum, estr):
102 raise Exception("Unable to find inter-site transports - (%s)" %
103 estr)
105 for msg in res:
106 dnstr = str(msg.dn)
108 # already loaded
109 if dnstr in self.transport_table.keys():
110 continue
112 transport = Transport(dnstr)
114 transport.load_transport(self.samdb)
116 # Assign this transport to table
117 # and index by dn
118 self.transport_table[dnstr] = transport
120 def load_all_sitelinks(self):
121 """Loads the inter-site siteLink objects
123 ::returns: Raises an Exception on error
125 try:
126 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
127 self.samdb.get_config_basedn(),
128 scope=ldb.SCOPE_SUBTREE,
129 expression="(objectClass=siteLink)")
130 except ldb.LdbError, (enum, estr):
131 raise Exception("Unable to find inter-site siteLinks - (%s)" % estr)
133 for msg in res:
134 dnstr = str(msg.dn)
136 # already loaded
137 if dnstr in self.sitelink_table.keys():
138 continue
140 sitelink = SiteLink(dnstr)
142 sitelink.load_sitelink(self.samdb)
144 # Assign this siteLink to table
145 # and index by dn
146 self.sitelink_table[dnstr] = sitelink
148 def get_sitelink(self, site1_dnstr, site2_dnstr):
149 """Return the siteLink (if it exists) that connects the
150 two input site DNs
152 for sitelink in self.sitelink_table.values():
153 if sitelink.is_sitelink(site1_dnstr, site2_dnstr):
154 return sitelink
155 return None
157 def load_my_site(self):
158 """Loads the Site class for the local DSA
160 ::returns: Raises an Exception on error
162 self.my_site_dnstr = "CN=%s,CN=Sites,%s" % (
163 self.samdb.server_site_name(),
164 self.samdb.get_config_basedn())
165 site = Site(self.my_site_dnstr)
166 site.load_site(self.samdb)
168 self.site_table[self.my_site_dnstr] = site
169 self.my_site = site
171 def load_all_sites(self):
172 """Discover all sites and instantiate and load each
173 NTDS Site settings.
175 ::returns: Raises an Exception on error
177 try:
178 res = self.samdb.search("CN=Sites,%s" %
179 self.samdb.get_config_basedn(),
180 scope=ldb.SCOPE_SUBTREE,
181 expression="(objectClass=site)")
182 except ldb.LdbError, (enum, estr):
183 raise Exception("Unable to find sites - (%s)" % estr)
185 for msg in res:
186 sitestr = str(msg.dn)
188 # already loaded
189 if sitestr in self.site_table.keys():
190 continue
192 site = Site(sitestr)
193 site.load_site(self.samdb)
195 self.site_table[sitestr] = site
197 def load_my_dsa(self):
198 """Discover my nTDSDSA dn thru the rootDSE entry
200 ::returns: Raises an Exception on error.
202 dn = ldb.Dn(self.samdb, "")
203 try:
204 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
205 attrs=["dsServiceName"])
206 except ldb.LdbError, (enum, estr):
207 raise Exception("Unable to find my nTDSDSA - (%s)" % estr)
209 self.my_dsa_dnstr = res[0]["dsServiceName"][0]
210 self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
212 def load_all_partitions(self):
213 """Discover all NCs thru the Partitions dn and
214 instantiate and load the NCs.
216 Each NC is inserted into the part_table by partition
217 dn string (not the nCName dn string)
219 ::returns: Raises an Exception on error
221 try:
222 res = self.samdb.search("CN=Partitions,%s" %
223 self.samdb.get_config_basedn(),
224 scope=ldb.SCOPE_SUBTREE,
225 expression="(objectClass=crossRef)")
226 except ldb.LdbError, (enum, estr):
227 raise Exception("Unable to find partitions - (%s)" % estr)
229 for msg in res:
230 partstr = str(msg.dn)
232 # already loaded
233 if partstr in self.part_table.keys():
234 continue
236 part = Partition(partstr)
238 part.load_partition(self.samdb)
239 self.part_table[partstr] = part
241 def should_be_present_test(self):
242 """Enumerate all loaded partitions and DSAs in local
243 site and test if NC should be present as replica
245 for partdn, part in self.part_table.items():
246 for dsadn, dsa in self.my_site.dsa_table.items():
247 needed, ro, partial = part.should_be_present(dsa)
248 logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
249 (dsadn, part.nc_dnstr, needed, ro, partial))
251 def refresh_failed_links_connections(self):
252 """Instead of NULL link with failure_count = 0, the tuple is simply removed"""
254 # LINKS: Refresh failed links
255 self.kcc_failed_links = {}
256 current, needed = self.my_dsa.get_rep_tables()
257 for replica in current.values():
258 # For every possible connection to replicate
259 for reps_from in replica.rep_repsFrom:
260 failure_count = reps_from.consecutive_sync_failures
261 if failure_count <= 0:
262 continue
264 dsa_guid = str(reps_from.source_dsa_obj_guid)
265 time_first_failure = reps_from.last_success
266 last_result = reps_from.last_attempt
267 dns_name = reps_from.dns_name1
269 f = self.kcc_failed_links.get(dsa_guid)
270 if not f:
271 f = KCCFailedObject(dsa_guid, failure_count,
272 time_first_failure, last_result,
273 dns_name)
274 self.kcc_failed_links[dsa_guid] = f
275 #elif f.failure_count == 0:
276 # f.failure_count = failure_count
277 # f.time_first_failure = time_first_failure
278 # f.last_result = last_result
279 else:
280 f.failure_count = max(f.failure_count, failure_count)
281 f.time_first_failure = min(f.time_first_failure, time_first_failure)
282 f.last_result = last_result
284 # CONNECTIONS: Refresh failed connections
285 restore_connections = set()
286 for connection in self.kcc_failed_connections:
287 try:
288 drs_utils.drsuapi_connect(connection.dns_name, lp, creds)
289 # Failed connection is no longer failing
290 restore_connections.add(connection)
291 except drs_utils.drsException:
292 # Failed connection still failing
293 connection.failure_count += 1
295 # Remove the restored connections from the failed connections
296 self.kcc_failed_connections.difference_update(restore_connections)
298 def is_stale_link_connection(self, target_dsa):
299 """Returns False if no tuple z exists in the kCCFailedLinks or
300 kCCFailedConnections variables such that z.UUIDDsa is the
301 objectGUID of the target dsa, z.FailureCount > 0, and
302 the current time - z.TimeFirstFailure > 2 hours.
304 # Returns True if tuple z exists...
305 failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
306 if failed_link:
307 # failure_count should be > 0, but check anyways
308 if failed_link.failure_count > 0:
309 unix_first_time_failure = nttime2unix(failed_link.time_first_failure)
310 # TODO guard against future
311 current_time = int(time.time())
312 if unix_first_time_failure > current_time:
313 logger.error("The last success time attribute for \
314 repsFrom is in the future!")
316 # Perform calculation in seconds
317 if (current_time - unix_first_time_failure) > 60 * 60 * 2:
318 return True
320 # TODO connections
322 return False
324 # TODO: This should be backed by some form of local database
325 def remove_unneeded_failed_links_connections(self):
326 # Remove all tuples in kcc_failed_links where failure count = 0
327 # In this implementation, this should never happen.
329 # Remove all connections which were not used this run or connections
330 # that became active during this run.
331 pass
333 def remove_unneeded_ntdsconn(self, all_connected):
334 """Removes unneeded NTDS Connections after computation
335 of KCC intra and inter-site topology has finished.
337 mydsa = self.my_dsa
339 # Loop thru connections
340 for cn_dnstr, cn_conn in mydsa.connect_table.items():
342 s_dnstr = cn_conn.get_from_dnstr()
343 if s_dnstr is None:
344 cn_conn.to_be_deleted = True
345 continue
347 # Get the source DSA no matter what site
348 s_dsa = self.get_dsa(s_dnstr)
350 # Check if the DSA is in our site
351 if self.my_site.same_site(s_dsa):
352 same_site = True
353 else:
354 same_site = False
356 # Given an nTDSConnection object cn, if the DC with the
357 # nTDSDSA object dc that is the parent object of cn and
358 # the DC with the nTDSDA object referenced by cn!fromServer
359 # are in the same site, the KCC on dc deletes cn if all of
360 # the following are true:
362 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
364 # No site settings object s exists for the local DC's site, or
365 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
366 # s!options.
368 # Another nTDSConnection object cn2 exists such that cn and
369 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
370 # and either
372 # cn!whenCreated < cn2!whenCreated
374 # cn!whenCreated = cn2!whenCreated and
375 # cn!objectGUID < cn2!objectGUID
377 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
378 if same_site:
379 if not cn_conn.is_generated():
380 continue
382 if self.my_site.is_cleanup_ntdsconn_disabled():
383 continue
385 # Loop thru connections looking for a duplicate that
386 # fulfills the previous criteria
387 lesser = False
389 for cn2_dnstr, cn2_conn in mydsa.connect_table.items():
390 if cn2_conn is cn_conn:
391 continue
393 s2_dnstr = cn2_conn.get_from_dnstr()
394 if s2_dnstr is None:
395 continue
397 # If the NTDS Connections has a different
398 # fromServer field then no match
399 if s2_dnstr != s_dnstr:
400 continue
402 lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or
403 (cn_conn.whenCreated == cn2_conn.whenCreated and
404 cmp(cn_conn.guid, cn2_conn.guid) < 0))
406 if lesser:
407 break
409 if lesser and not cn_conn.is_rodc_topology():
410 cn_conn.to_be_deleted = True
412 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
413 # object dc that is the parent object of cn and the DC with
414 # the nTDSDSA object referenced by cn!fromServer are in
415 # different sites, a KCC acting as an ISTG in dc's site
416 # deletes cn if all of the following are true:
418 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
420 # cn!fromServer references an nTDSDSA object for a DC
421 # in a site other than the local DC's site.
423 # The keepConnections sequence returned by
424 # CreateIntersiteConnections() does not contain
425 # cn!objectGUID, or cn is "superseded by" (see below)
426 # another nTDSConnection cn2 and keepConnections
427 # contains cn2!objectGUID.
429 # The return value of CreateIntersiteConnections()
430 # was true.
432 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
433 # cn!options
435 else: # different site
437 if not mydsa.is_istg():
438 continue
440 if not cn_conn.is_generated():
441 continue
443 # TODO
444 # We are directly using this connection in intersite or
445 # we are using a connection which can supersede this one.
447 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
448 # appear to be correct.
450 # 1. cn!fromServer and cn!parent appear inconsistent with no cn2
451 # 2. The repsFrom do not imply each other
453 if self.keep_connection(cn_conn): # and not_superceded:
454 continue
456 # This is the result of create_intersite_connections
457 if not all_connected:
458 continue
460 if not cn_conn.is_rodc_topology():
461 cn_conn.to_be_deleted = True
464 if mydsa.is_ro() or opts.readonly:
465 for dnstr, connect in mydsa.connect_table.items():
466 if connect.to_be_deleted:
467 logger.info("TO BE DELETED:\n%s" % connect)
468 if connect.to_be_added:
469 logger.info("TO BE ADDED:\n%s" % connect)
471 # Peform deletion from our tables but perform
472 # no database modification
473 mydsa.commit_connections(self.samdb, ro=True)
474 else:
475 # Commit any modified connections
476 mydsa.commit_connections(self.samdb)
478 def get_dsa_by_guidstr(self, guidstr):
479 """Given a DSA guid string, consule all sites looking
480 for the corresponding DSA and return it.
482 for site in self.site_table.values():
483 dsa = site.get_dsa_by_guidstr(guidstr)
484 if dsa is not None:
485 return dsa
486 return None
488 def get_dsa(self, dnstr):
489 """Given a DSA dn string, consule all sites looking
490 for the corresponding DSA and return it.
492 for site in self.site_table.values():
493 dsa = site.get_dsa(dnstr)
494 if dsa is not None:
495 return dsa
496 return None
498 def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
499 """Update t_repsFrom if necessary to satisfy requirements. Such
500 updates are typically required when the IDL_DRSGetNCChanges
501 server has moved from one site to another--for example, to
502 enable compression when the server is moved from the
503 client's site to another site.
505 :param n_rep: NC replica we need
506 :param t_repsFrom: repsFrom tuple to modify
507 :param s_rep: NC replica at source DSA
508 :param s_dsa: source DSA
509 :param cn_conn: Local DSA NTDSConnection child
511 ::returns: (update) bit field containing which portion of the
512 repsFrom was modified. This bit field is suitable as input
513 to IDL_DRSReplicaModify ulModifyFields element, as it consists
514 of these bits:
515 drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
516 drsuapi.DRSUAPI_DRS_UPDATE_FLAGS
517 drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS
519 s_dnstr = s_dsa.dsa_dnstr
520 update = 0x0
522 if self.my_site.same_site(s_dsa):
523 same_site = True
524 else:
525 same_site = False
527 times = cn_conn.convert_schedule_to_repltimes()
529 # if schedule doesn't match then update and modify
530 if times != t_repsFrom.schedule:
531 t_repsFrom.schedule = times
533 # Bit DRS_PER_SYNC is set in replicaFlags if and only
534 # if nTDSConnection schedule has a value v that specifies
535 # scheduled replication is to be performed at least once
536 # per week.
537 if cn_conn.is_schedule_minimum_once_per_week():
539 if (t_repsFrom.replica_flags &
540 drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0:
541 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
543 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
544 # if the source DSA and the local DC's nTDSDSA object are
545 # in the same site or source dsa is the FSMO role owner
546 # of one or more FSMO roles in the NC replica.
547 if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
549 if (t_repsFrom.replica_flags &
550 drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0:
551 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
553 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
554 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
555 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
556 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
557 # t.replicaFlags if and only if s and the local DC's
558 # nTDSDSA object are in different sites.
559 if (cn_conn.options & dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0:
561 if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
563 if (t_repsFrom.replica_flags &
564 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0:
565 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
567 elif not same_site:
569 if (t_repsFrom.replica_flags &
570 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0:
571 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
573 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
574 # and only if s and the local DC's nTDSDSA object are
575 # not in the same site and the
576 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
577 # clear in cn!options
578 if (not same_site and
579 (cn_conn.options &
580 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
582 if (t_repsFrom.replica_flags &
583 drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0:
584 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
586 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
587 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
588 if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
590 if (t_repsFrom.replica_flags &
591 drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0:
592 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
594 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
595 # set in t.replicaFlags if and only if cn!enabledConnection = false.
596 if not cn_conn.is_enabled():
598 if (t_repsFrom.replica_flags &
599 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0:
600 t_repsFrom.replica_flags |= \
601 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
603 if (t_repsFrom.replica_flags &
604 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0:
605 t_repsFrom.replica_flags |= \
606 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
608 # If s and the local DC's nTDSDSA object are in the same site,
609 # cn!transportType has no value, or the RDN of cn!transportType
610 # is CN=IP:
612 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
614 # t.uuidTransport = NULL GUID.
616 # t.uuidDsa = The GUID-based DNS name of s.
618 # Otherwise:
620 # Bit DRS_MAIL_REP in t.replicaFlags is set.
622 # If x is the object with dsname cn!transportType,
623 # t.uuidTransport = x!objectGUID.
625 # Let a be the attribute identified by
626 # x!transportAddressAttribute. If a is
627 # the dNSHostName attribute, t.uuidDsa = the GUID-based
628 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
630 # It appears that the first statement i.e.
632 # "If s and the local DC's nTDSDSA object are in the same
633 # site, cn!transportType has no value, or the RDN of
634 # cn!transportType is CN=IP:"
636 # could be a slightly tighter statement if it had an "or"
637 # between each condition. I believe this should
638 # be interpreted as:
640 # IF (same-site) OR (no-value) OR (type-ip)
642 # because IP should be the primary transport mechanism
643 # (even in inter-site) and the absense of the transportType
644 # attribute should always imply IP no matter if its multi-site
646 # NOTE MS-TECH INCORRECT:
648 # All indications point to these statements above being
649 # incorrectly stated:
651 # t.uuidDsa = The GUID-based DNS name of s.
653 # Let a be the attribute identified by
654 # x!transportAddressAttribute. If a is
655 # the dNSHostName attribute, t.uuidDsa = the GUID-based
656 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
658 # because the uuidDSA is a GUID and not a GUID-base DNS
659 # name. Nor can uuidDsa hold (s!parent)!a if not
660 # dNSHostName. What should have been said is:
662 # t.naDsa = The GUID-based DNS name of s
664 # That would also be correct if transportAddressAttribute
665 # were "mailAddress" because (naDsa) can also correctly
666 # hold the SMTP ISM service address.
668 nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
670 # We're not currently supporting SMTP replication
671 # so is_smtp_replication_available() is currently
672 # always returning False
673 if (same_site or
674 cn_conn.transport_dnstr is None or
675 cn_conn.transport_dnstr.find("CN=IP") == 0 or
676 not is_smtp_replication_available()):
678 if (t_repsFrom.replica_flags &
679 drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0:
680 t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
682 null_guid = misc.GUID()
683 if (t_repsFrom.transport_guid is None or
684 t_repsFrom.transport_guid != null_guid):
685 t_repsFrom.transport_guid = null_guid
687 # See (NOTE MS-TECH INCORRECT) above
688 if t_repsFrom.version == 0x1:
689 if t_repsFrom.dns_name1 is None or \
690 t_repsFrom.dns_name1 != nastr:
691 t_repsFrom.dns_name1 = nastr
692 else:
693 if t_repsFrom.dns_name1 is None or \
694 t_repsFrom.dns_name2 is None or \
695 t_repsFrom.dns_name1 != nastr or \
696 t_repsFrom.dns_name2 != nastr:
697 t_repsFrom.dns_name1 = nastr
698 t_repsFrom.dns_name2 = nastr
700 else:
701 if (t_repsFrom.replica_flags &
702 drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0:
703 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP
705 # We have a transport type but its not an
706 # object in the database
707 if cn_conn.transport_dnstr not in self.transport_table.keys():
708 raise Exception("Missing inter-site transport - (%s)" %
709 cn_conn.transport_dnstr)
711 x_transport = self.transport_table[cn_conn.transport_dnstr]
713 if t_repsFrom.transport_guid != x_transport.guid:
714 t_repsFrom.transport_guid = x_transport.guid
716 # See (NOTE MS-TECH INCORRECT) above
717 if x_transport.address_attr == "dNSHostName":
719 if t_repsFrom.version == 0x1:
720 if t_repsFrom.dns_name1 is None or \
721 t_repsFrom.dns_name1 != nastr:
722 t_repsFrom.dns_name1 = nastr
723 else:
724 if t_repsFrom.dns_name1 is None or \
725 t_repsFrom.dns_name2 is None or \
726 t_repsFrom.dns_name1 != nastr or \
727 t_repsFrom.dns_name2 != nastr:
728 t_repsFrom.dns_name1 = nastr
729 t_repsFrom.dns_name2 = nastr
731 else:
732 # MS tech specification says we retrieve the named
733 # attribute in "transportAddressAttribute" from the parent of
734 # the DSA object
735 try:
736 pdnstr = s_dsa.get_parent_dnstr()
737 attrs = [ x_transport.address_attr ]
739 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
740 attrs=attrs)
741 except ldb.LdbError, (enum, estr):
742 raise Exception(
743 "Unable to find attr (%s) for (%s) - (%s)" %
744 (x_transport.address_attr, pdnstr, estr))
746 msg = res[0]
747 nastr = str(msg[x_transport.address_attr][0])
749 # See (NOTE MS-TECH INCORRECT) above
750 if t_repsFrom.version == 0x1:
751 if t_repsFrom.dns_name1 is None or \
752 t_repsFrom.dns_name1 != nastr:
753 t_repsFrom.dns_name1 = nastr
754 else:
755 if t_repsFrom.dns_name1 is None or \
756 t_repsFrom.dns_name2 is None or \
757 t_repsFrom.dns_name1 != nastr or \
758 t_repsFrom.dns_name2 != nastr:
760 t_repsFrom.dns_name1 = nastr
761 t_repsFrom.dns_name2 = nastr
763 if t_repsFrom.is_modified():
764 logger.debug("modify_repsFrom(): %s" % t_repsFrom)
766 def is_repsFrom_implied(self, n_rep, cn_conn):
767 """Given a NC replica and NTDS Connection, determine if the connection
768 implies a repsFrom tuple should be present from the source DSA listed
769 in the connection to the naming context
771 :param n_rep: NC replica
772 :param conn: NTDS Connection
773 ::returns (True || False), source DSA:
775 # NTDS Connection must satisfy all the following criteria
776 # to imply a repsFrom tuple is needed:
778 # cn!enabledConnection = true.
779 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
780 # cn!fromServer references an nTDSDSA object.
781 s_dsa = None
783 if cn_conn.is_enabled() and not cn_conn.is_rodc_topology():
785 s_dnstr = cn_conn.get_from_dnstr()
786 if s_dnstr is not None:
787 s_dsa = self.get_dsa(s_dnstr)
789 # No DSA matching this source DN string?
790 if s_dsa is None:
791 return False, None
793 # To imply a repsFrom tuple is needed, each of these
794 # must be True:
796 # An NC replica of the NC "is present" on the DC to
797 # which the nTDSDSA object referenced by cn!fromServer
798 # corresponds.
800 # An NC replica of the NC "should be present" on
801 # the local DC
802 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
804 if s_rep is None or not s_rep.is_present():
805 return False, None
807 # To imply a repsFrom tuple is needed, each of these
808 # must be True:
810 # The NC replica on the DC referenced by cn!fromServer is
811 # a writable replica or the NC replica that "should be
812 # present" on the local DC is a partial replica.
814 # The NC is not a domain NC, the NC replica that
815 # "should be present" on the local DC is a partial
816 # replica, cn!transportType has no value, or
817 # cn!transportType has an RDN of CN=IP.
819 implied = (not s_rep.is_ro() or n_rep.is_partial()) and \
820 (not n_rep.is_domain() or
821 n_rep.is_partial() or
822 cn_conn.transport_dnstr is None or
823 cn_conn.transport_dnstr.find("CN=IP") == 0)
825 if implied:
826 return True, s_dsa
827 else:
828 return False, None
830 def translate_ntdsconn(self):
831 """This function adjusts values of repsFrom abstract attributes of NC
832 replicas on the local DC to match those implied by
833 nTDSConnection objects.
835 logger.debug("translate_ntdsconn(): enter")
837 if self.my_dsa.is_translate_ntdsconn_disabled():
838 return
840 current_rep_table, needed_rep_table = self.my_dsa.get_rep_tables()
842 # Filled in with replicas we currently have that need deleting
843 delete_rep_table = {}
845 # We're using the MS notation names here to allow
846 # correlation back to the published algorithm.
848 # n_rep - NC replica (n)
849 # t_repsFrom - tuple (t) in n!repsFrom
850 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
851 # object (s) such that (s!objectGUID = t.uuidDsa)
852 # In our IDL representation of repsFrom the (uuidDsa)
853 # attribute is called (source_dsa_obj_guid)
854 # cn_conn - (cn) is nTDSConnection object and child of the local DC's
855 # nTDSDSA object and (cn!fromServer = s)
856 # s_rep - source DSA replica of n
858 # If we have the replica and its not needed
859 # then we add it to the "to be deleted" list.
860 for dnstr, n_rep in current_rep_table.items():
861 if dnstr not in needed_rep_table.keys():
862 delete_rep_table[dnstr] = n_rep
864 # Now perform the scan of replicas we'll need
865 # and compare any current repsFrom against the
866 # connections
867 for dnstr, n_rep in needed_rep_table.items():
869 # load any repsFrom and fsmo roles as we'll
870 # need them during connection translation
871 n_rep.load_repsFrom(self.samdb)
872 n_rep.load_fsmo_roles(self.samdb)
874 # Loop thru the existing repsFrom tupples (if any)
875 for i, t_repsFrom in enumerate(n_rep.rep_repsFrom):
877 # for each tuple t in n!repsFrom, let s be the nTDSDSA
878 # object such that s!objectGUID = t.uuidDsa
879 guidstr = str(t_repsFrom.source_dsa_obj_guid)
880 s_dsa = self.get_dsa_by_guidstr(guidstr)
882 # Source dsa is gone from config (strange)
883 # so cleanup stale repsFrom for unlisted DSA
884 if s_dsa is None:
885 logger.debug("repsFrom source DSA guid (%s) not found" %
886 guidstr)
887 t_repsFrom.to_be_deleted = True
888 continue
890 s_dnstr = s_dsa.dsa_dnstr
892 # Retrieve my DSAs connection object (if it exists)
893 # that specifies the fromServer equivalent to
894 # the DSA that is specified in the repsFrom source
895 cn_conn = self.my_dsa.get_connection_by_from_dnstr(s_dnstr)
897 # Let (cn) be the nTDSConnection object such that (cn)
898 # is a child of the local DC's nTDSDSA object and
899 # (cn!fromServer = s) and (cn!options) does not contain
900 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
901 if cn_conn and cn_conn.is_rodc_topology():
902 cn_conn = None
904 # KCC removes this repsFrom tuple if any of the following
905 # is true:
906 # cn = NULL.
908 # No NC replica of the NC "is present" on DSA that
909 # would be source of replica
911 # A writable replica of the NC "should be present" on
912 # the local DC, but a partial replica "is present" on
913 # the source DSA
914 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
916 if cn_conn is None or \
917 s_rep is None or not s_rep.is_present() or \
918 (not n_rep.is_ro() and s_rep.is_partial()):
920 t_repsFrom.to_be_deleted = True
921 continue
923 # If the KCC did not remove t from n!repsFrom, it updates t
924 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
926 # Loop thru connections and add implied repsFrom tuples
927 # for each NTDSConnection under our local DSA if the
928 # repsFrom is not already present
929 for cn_dnstr, cn_conn in self.my_dsa.connect_table.items():
931 implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn)
932 if not implied:
933 continue
935 # Loop thru the existing repsFrom tupples (if any) and
936 # if we already have a tuple for this connection then
937 # no need to proceed to add. It will have been changed
938 # to have the correct attributes above
939 for i, t_repsFrom in enumerate(n_rep.rep_repsFrom):
941 guidstr = str(t_repsFrom.source_dsa_obj_guid)
942 if s_dsa is self.get_dsa_by_guidstr(guidstr):
943 s_dsa = None
944 break
946 if s_dsa is None:
947 continue
949 # Create a new RepsFromTo and proceed to modify
950 # it according to specification
951 t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
953 t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
955 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
957 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
959 # Add to our NC repsFrom as this is newly computed
960 if t_repsFrom.is_modified():
961 n_rep.rep_repsFrom.append(t_repsFrom)
963 if opts.readonly:
964 # Display any to be deleted or modified repsFrom
965 text = n_rep.dumpstr_to_be_deleted()
966 if text:
967 logger.info("TO BE DELETED:\n%s" % text)
968 text = n_rep.dumpstr_to_be_modified()
969 if text:
970 logger.info("TO BE MODIFIED:\n%s" % text)
972 # Peform deletion from our tables but perform
973 # no database modification
974 n_rep.commit_repsFrom(self.samdb, ro=True)
975 else:
976 # Commit any modified repsFrom to the NC replica
977 n_rep.commit_repsFrom(self.samdb)
979 def keep_connection(self, cn_conn):
980 """Determines if the connection is meant to be kept during the
981 pruning of unneeded connections operation.
983 Consults the keep_connection_list[] which was built during
984 intersite NC replica graph computation.
986 ::returns (True or False): if (True) connection should not be pruned
988 if cn_conn in self.keep_connection_list:
989 return True
990 return False
992 def merge_failed_links(self):
993 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
994 The KCC on a writable DC attempts to merge the link and connection
995 failure information from bridgehead DCs in its own site to help it
996 identify failed bridgehead DCs.
998 # MS-TECH Ref 6.2.2.3.2 Merge of kCCFailedLinks and kCCFailedLinks
999 # from Bridgeheads
1001 # 1. Queries every bridgehead server in your site (other than yourself)
1002 # 2. For every ntDSConnection that references a server in a different
1003 # site merge all the failure info
1005 # XXX - not implemented yet
1007 def setup_graph(self, part):
1008 """Set up a GRAPH, populated with a VERTEX for each site
1009 object, a MULTIEDGE for each siteLink object, and a
1010 MUTLIEDGESET for each siteLinkBridge object (or implied
1011 siteLinkBridge).
1013 ::returns: a new graph
1015 dn_to_vertex = {}
1016 # Create graph
1017 g = IntersiteGraph()
1018 # Add vertices
1019 for site_dn, site in self.site_table.items():
1020 vertex = Vertex(site, part)
1021 vertex.guid = site_dn
1022 g.vertices.add(vertex)
1024 if not dn_to_vertex.get(site_dn):
1025 dn_to_vertex[site_dn] = []
1027 dn_to_vertex[site_dn].append(vertex)
1029 connected_vertices = set()
1030 for transport_dn, transport in self.transport_table.items():
1031 # Currently only ever "IP"
1032 for site_link_dn, site_link in self.sitelink_table.items():
1033 new_edge = create_edge(transport_dn, site_link, dn_to_vertex)
1034 connected_vertices.update(new_edge.vertices)
1035 g.edges.add(new_edge)
1037 # If 'Bridge all site links' is enabled and Win2k3 bridges required is not set
1038 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1039 # No documentation for this however, ntdsapi.h appears to have listed:
1040 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1041 if ((self.my_site.site_options & 0x00000002) == 0
1042 and (self.my_site.site_options & 0x00001000) == 0):
1043 g.edge_set.add(create_auto_edge_set(g, transport_dn))
1044 else:
1045 # TODO get all site link bridges
1046 for site_link_bridge in []:
1047 g.edge_set.add(create_edge_set(g, transport_dn,
1048 site_link_bridge))
1050 g.connected_vertices = connected_vertices
1052 return g
1054 def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
1055 """Get a bridghead DC.
1057 :param site: site object representing for which a bridgehead
1058 DC is desired.
1059 :param part: crossRef for NC to replicate.
1060 :param transport: interSiteTransport object for replication
1061 traffic.
1062 :param partial_ok: True if a DC containing a partial
1063 replica or a full replica will suffice, False if only
1064 a full replica will suffice.
1065 :param detect_failed: True to detect failed DCs and route
1066 replication traffic around them, False to assume no DC
1067 has failed.
1068 ::returns: dsa object for the bridgehead DC or None
1071 bhs = self.get_all_bridgeheads(site, part, transport,
1072 partial_ok, detect_failed)
1073 if len(bhs) == 0:
1074 logger.debug("get_bridgehead: exit\n\tsitedn=%s\n\tbhdn=None" %
1075 site.site_dnstr)
1076 return None
1077 else:
1078 logger.debug("get_bridgehead: exit\n\tsitedn=%s\n\tbhdn=%s" %
1079 (site.site_dnstr, bhs[0].dsa_dnstr))
1080 return bhs[0]
1082 def get_all_bridgeheads(self, site, part, transport,
1083 partial_ok, detect_failed):
1084 """Get all bridghead DCs satisfying the given criteria
1086 :param site: site object representing the site for which
1087 bridgehead DCs are desired.
1088 :param part: partition for NC to replicate.
1089 :param transport: interSiteTransport object for
1090 replication traffic.
1091 :param partial_ok: True if a DC containing a partial
1092 replica or a full replica will suffice, False if
1093 only a full replica will suffice.
1094 :param detect_ok: True to detect failed DCs and route
1095 replication traffic around them, FALSE to assume
1096 no DC has failed.
1097 ::returns: list of dsa object for available bridgehead
1098 DCs or None
1101 bhs = []
1103 logger.debug("get_all_bridgeheads: %s" % transport)
1105 for key, dsa in site.dsa_table.items():
1107 pdnstr = dsa.get_parent_dnstr()
1109 # IF t!bridgeheadServerListBL has one or more values and
1110 # t!bridgeheadServerListBL does not contain a reference
1111 # to the parent object of dc then skip dc
1112 if (len(transport.bridgehead_list) != 0 and
1113 pdnstr not in transport.bridgehead_list):
1114 continue
1116 # IF dc is in the same site as the local DC
1117 # IF a replica of cr!nCName is not in the set of NC replicas
1118 # that "should be present" on dc or a partial replica of the
1119 # NC "should be present" but partialReplicasOkay = FALSE
1120 # Skip dc
1121 if self.my_site.same_site(dsa):
1122 needed, ro, partial = part.should_be_present(dsa)
1123 if not needed or (partial and not partial_ok):
1124 continue
1126 # ELSE
1127 # IF an NC replica of cr!nCName is not in the set of NC
1128 # replicas that "are present" on dc or a partial replica of
1129 # the NC "is present" but partialReplicasOkay = FALSE
1130 # Skip dc
1131 else:
1132 rep = dsa.get_current_replica(part.nc_dnstr)
1133 if rep is None or (rep.is_partial() and not partial_ok):
1134 continue
1136 # IF AmIRODC() and cr!nCName corresponds to default NC then
1137 # Let dsaobj be the nTDSDSA object of the dc
1138 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1139 # Skip dc
1140 if self.my_dsa.is_ro() and part.is_default():
1141 if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1142 continue
1144 # IF t!name != "IP" and the parent object of dc has no value for
1145 # the attribute specified by t!transportAddressAttribute
1146 # Skip dc
1147 if transport.name != "IP":
1148 # MS tech specification says we retrieve the named
1149 # attribute in "transportAddressAttribute" from the parent
1150 # of the DSA object
1151 try:
1152 attrs = [ transport.address_attr ]
1154 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
1155 attrs=attrs)
1156 except ldb.LdbError, (enum, estr):
1157 continue
1159 msg = res[0]
1160 if transport.address_attr not in msg:
1161 continue
1163 nastr = str(msg[transport.address_attr][0])
1165 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1166 # Skip dc
1167 if self.is_bridgehead_failed(dsa, detect_failed):
1168 continue
1170 logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
1171 bhs.append(dsa)
1173 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1174 # s!options
1175 # SORT bhs such that all GC servers precede DCs that are not GC
1176 # servers, and otherwise by ascending objectGUID
1177 # ELSE
1178 # SORT bhs in a random order
1179 if site.is_random_bridgehead_disabled():
1180 bhs.sort(sort_dsa_by_gc_and_guid)
1181 else:
1182 random.shuffle(bhs)
1184 return bhs
1187 def is_bridgehead_failed(self, dsa, detect_failed):
1188 """Determine whether a given DC is known to be in a failed state
1189 ::returns: True if and only if the DC should be considered failed
1191 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1192 # When DETECT_STALE_DISABLED, we can never know of if it's in a failed state
1193 if self.my_site.site_options & 0x00000008:
1194 return False
1195 elif self.is_stale_link_connection(dsa):
1196 return True
1198 return detect_failed
1200 def create_connection(self, part, rbh, rsite, transport,
1201 lbh, lsite, link_opt, link_sched,
1202 partial_ok, detect_failed):
1203 """Create an nTDSConnection object with the given parameters
1204 if one does not already exist.
1206 :param part: crossRef object for the NC to replicate.
1207 :param rbh: nTDSDSA object for DC to act as the
1208 IDL_DRSGetNCChanges server (which is in a site other
1209 than the local DC's site).
1210 :param rsite: site of the rbh
1211 :param transport: interSiteTransport object for the transport
1212 to use for replication traffic.
1213 :param lbh: nTDSDSA object for DC to act as the
1214 IDL_DRSGetNCChanges client (which is in the local DC's site).
1215 :param lsite: site of the lbh
1216 :param link_opt: Replication parameters (aggregated siteLink options, etc.)
1217 :param link_sched: Schedule specifying the times at which
1218 to begin replicating.
1219 :partial_ok: True if bridgehead DCs containing partial
1220 replicas of the NC are acceptable.
1221 :param detect_failed: True to detect failed DCs and route
1222 replication traffic around them, FALSE to assume no DC
1223 has failed.
1225 rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
1226 partial_ok, False)
1228 # MS-TECH says to compute rbhs_avail but then doesn't use it
1229 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1230 # partial_ok, detect_failed)
1232 lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
1233 partial_ok, False)
1235 # MS-TECH says to compute lbhs_avail but then doesn't use it
1236 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1237 # partial_ok, detect_failed)
1239 # FOR each nTDSConnection object cn such that the parent of cn is
1240 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1241 for ldsa in lbhs_all:
1242 for cn in ldsa.connect_table.values():
1244 rdsa = None
1245 for rdsa in rbhs_all:
1246 if cn.from_dnstr == rdsa.dsa_dnstr:
1247 break
1249 if rdsa is None:
1250 continue
1252 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1253 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1254 # cn!transportType references t
1255 if (cn.is_generated() and not cn.is_rodc_topology() and
1256 cn.transport_dnstr == transport.dnstr):
1258 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1259 # cn!options and cn!schedule != sch
1260 # Perform an originating update to set cn!schedule to
1261 # sched
1262 if (not cn.is_user_owned_schedule() and
1263 not cn.is_equivalent_schedule(link_sched)):
1264 cn.schedule = link_sched
1265 cn.set_modified(True)
1267 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1268 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1269 if cn.is_override_notify_default() and \
1270 cn.is_use_notify():
1272 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1273 # ri.Options
1274 # Perform an originating update to clear bits
1275 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1276 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1277 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
1278 cn.options &= \
1279 ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1280 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1281 cn.set_modified(True)
1283 # ELSE
1284 else:
1286 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1287 # ri.Options
1288 # Perform an originating update to set bits
1289 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1290 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1291 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1292 cn.options |= \
1293 (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1294 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1295 cn.set_modified(True)
1298 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1299 if cn.is_twoway_sync():
1301 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1302 # ri.Options
1303 # Perform an originating update to clear bit
1304 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1305 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
1306 cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1307 cn.set_modified(True)
1309 # ELSE
1310 else:
1312 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1313 # ri.Options
1314 # Perform an originating update to set bit
1315 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1316 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1317 cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1318 cn.set_modified(True)
1321 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1322 # in cn!options
1323 if cn.is_intersite_compression_disabled():
1325 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1326 # in ri.Options
1327 # Perform an originating update to clear bit
1328 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1329 # cn!options
1330 if (link_opt &
1331 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0:
1332 cn.options &= \
1333 ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1334 cn.set_modified(True)
1336 # ELSE
1337 else:
1338 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1339 # ri.Options
1340 # Perform an originating update to set bit
1341 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1342 # cn!options
1343 if (link_opt &
1344 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0:
1345 cn.options |= \
1346 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1347 cn.set_modified(True)
1349 # Display any modified connection
1350 if opts.readonly:
1351 if cn.to_be_modified:
1352 logger.info("TO BE MODIFIED:\n%s" % cn)
1354 ldsa.commit_connections(self.samdb, ro=True)
1355 else:
1356 ldsa.commit_connections(self.samdb)
1357 # ENDFOR
1359 valid_connections = 0
1361 # FOR each nTDSConnection object cn such that cn!parent is
1362 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1363 for ldsa in lbhs_all:
1364 for cn in ldsa.connect_table.values():
1366 rdsa = None
1367 for rdsa in rbhs_all:
1368 if cn.from_dnstr == rdsa.dsa_dnstr:
1369 break
1371 if rdsa is None:
1372 continue
1374 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1375 # cn!transportType references t) and
1376 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1377 if ((not cn.is_generated() or
1378 cn.transport_dnstr == transport.dnstr) and
1379 not cn.is_rodc_topology()):
1381 # LET rguid be the objectGUID of the nTDSDSA object
1382 # referenced by cn!fromServer
1383 # LET lguid be (cn!parent)!objectGUID
1385 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1386 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1387 # Increment cValidConnections by 1
1388 if (not self.is_bridgehead_failed(rdsa, detect_failed) and
1389 not self.is_bridgehead_failed(ldsa, detect_failed)):
1390 valid_connections += 1
1392 # IF keepConnections does not contain cn!objectGUID
1393 # APPEND cn!objectGUID to keepConnections
1394 if not self.keep_connection(cn):
1395 self.keep_connection_list.append(cn)
1397 # ENDFOR
1399 # IF cValidConnections = 0
1400 if valid_connections == 0:
1402 # LET opt be NTDSCONN_OPT_IS_GENERATED
1403 opt = dsdb.NTDSCONN_OPT_IS_GENERATED
1405 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1406 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1407 # NTDSCONN_OPT_USE_NOTIFY in opt
1408 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1409 opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1410 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1412 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1413 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1414 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1415 opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1417 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1418 # ri.Options
1419 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1420 if (link_opt &
1421 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0:
1422 opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1424 # Perform an originating update to create a new nTDSConnection
1425 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1426 # cn!options = opt, cn!transportType is a reference to t,
1427 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1428 cn = lbh.new_connection(opt, 0, transport, lbh.dsa_dnstr, link_sched)
1430 # Display any added connection
1431 if opts.readonly:
1432 if cn.to_be_added:
1433 logger.info("TO BE ADDED:\n%s" % cn)
1435 lbh.commit_connections(self.samdb, ro=True)
1436 else:
1437 lbh.commit_connections(self.samdb)
1439 # APPEND cn!objectGUID to keepConnections
1440 if not self.keep_connection(cn):
1441 self.keep_connection_list.append(cn)
1443 def add_transports(self, vertex, local_vertex, graph, detect_failed):
1444 vertex.accept_red_red = []
1445 vertex.accept_black = []
1446 found_failed = False
1447 for t_guid, transport in self.transport_table.items():
1448 # FLAG_CR_NTDS_DOMAIN 0x00000002
1449 if (local_vertex.is_red() and transport != "IP" and
1450 vertex.part.system_flags & 0x00000002):
1451 continue
1453 if vertex in graph.connected_vertices:
1454 continue
1456 partial_replica_okay = vertex.is_black()
1458 bh = self.get_bridgehead(local_vertex.site, vertex.part, transport,
1459 partial_replica_okay, detect_failed)
1460 if bh is None:
1461 found_failed = True
1462 continue
1464 vertex.accept_red_red.append(t_guid) # TODO should be guid
1465 vertex.accept_black.append(t_guid) # TODO should be guid
1467 # Add additional transport to allow another run of Dijkstra
1468 vertex.accept_red_red.append("EDGE_TYPE_ALL")
1469 vertex.accept_black.append("EDGE_TYPE_ALL")
1471 return found_failed
1473 def create_connections(self, graph, part, detect_failed):
1474 """Construct an NC replica graph for the NC identified by
1475 the given crossRef, then create any additional nTDSConnection
1476 objects required.
1478 :param graph: site graph.
1479 :param part: crossRef object for NC.
1480 :param detect_failed: True to detect failed DCs and route
1481 replication traffic around them, False to assume no DC
1482 has failed.
1484 Modifies self.keep_connection_list by adding any connections
1485 deemed to be "in use".
1487 ::returns: (all_connected, found_failed_dc)
1488 (all_connected) True if the resulting NC replica graph
1489 connects all sites that need to be connected.
1490 (found_failed_dc) True if one or more failed DCs were
1491 detected.
1493 all_connected = True
1494 found_failed = False
1496 logger.debug("create_connections(): enter\n\tpartdn=%s\n\tdetect_failed=%s" %
1497 (part.nc_dnstr, detect_failed))
1499 # XXX - This is a highly abbreviated function from the MS-TECH
1500 # ref. It creates connections between bridgeheads to all
1501 # sites that have appropriate replicas. Thus we are not
1502 # creating a minimum cost spanning tree but instead
1503 # producing a fully connected tree. This should produce
1504 # a full (albeit not optimal cost) replication topology.
1506 my_vertex = Vertex(self.my_site, part)
1507 my_vertex.color_vertex()
1509 for v in graph.vertices:
1510 v.color_vertex()
1511 self.add_transports(v, my_vertex, graph, detect_failed)
1513 # No NC replicas for this NC in the site of the local DC,
1514 # so no nTDSConnection objects need be created
1515 if my_vertex.is_white():
1516 return all_connected, found_failed
1518 edge_list, component_count = self.get_spanning_tree_edges(graph)
1520 if component_count > 1:
1521 all_connected = False
1523 # LET partialReplicaOkay be TRUE if and only if
1524 # localSiteVertex.Color = COLOR.BLACK
1525 if my_vertex.is_black():
1526 partial_ok = True
1527 else:
1528 partial_ok = False
1530 # Utilize the IP transport only for now
1531 transport = None
1532 for transport in self.transport_table.values():
1533 if transport.name == "IP":
1534 break
1536 if transport is None:
1537 raise Exception("Unable to find inter-site transport for IP")
1539 for e in edge_list:
1540 if e.directed and e.vertices[0].site is self.my_site: # more accurate comparison?
1541 continue
1543 if e.vertices[0].site is self.my_site:
1544 rsite = e.vertices[1]
1545 else:
1546 rsite = e.vertices[0]
1548 # We don't make connections to our own site as that
1549 # is intrasite topology generator's job
1550 if rsite is self.my_site:
1551 continue
1553 # Determine bridgehead server in remote site
1554 rbh = self.get_bridgehead(rsite, part, transport,
1555 partial_ok, detect_failed)
1557 # RODC acts as an BH for itself
1558 # IF AmIRODC() then
1559 # LET lbh be the nTDSDSA object of the local DC
1560 # ELSE
1561 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1562 # cr, t, partialReplicaOkay, detectFailedDCs)
1563 if self.my_dsa.is_ro():
1564 lsite = self.my_site
1565 lbh = self.my_dsa
1566 else:
1567 lsite = self.my_site
1568 lbh = self.get_bridgehead(lsite, part, transport,
1569 partial_ok, detect_failed)
1571 # Find the siteLink object that enumerates the connection
1572 # between the two sites if it is present
1573 sitelink = self.get_sitelink(lsite.site_dnstr, rsite.site_dnstr)
1574 if sitelink is None:
1575 link_opt = 0x0
1576 link_sched = None
1577 else:
1578 link_opt = sitelink.options
1579 link_sched = sitelink.schedule
1581 self.create_connection(part, rbh, rsite, transport,
1582 lbh, lsite, link_opt, link_sched,
1583 partial_ok, detect_failed)
1585 return all_connected, found_failed
1587 def create_intersite_connections(self):
1588 """Computes an NC replica graph for each NC replica that "should be
1589 present" on the local DC or "is present" on any DC in the same site
1590 as the local DC. For each edge directed to an NC replica on such a
1591 DC from an NC replica on a DC in another site, the KCC creates an
1592 nTDSConnection object to imply that edge if one does not already
1593 exist.
1595 Modifies self.keep_connection_list - A list of nTDSConnection
1596 objects for edges that are directed
1597 to the local DC's site in one or more NC replica graphs.
1599 returns: True if spanning trees were created for all NC replica
1600 graphs, otherwise False.
1602 all_connected = True
1603 self.keep_connection_list = []
1605 # LET crossRefList be the set containing each object o of class
1606 # crossRef such that o is a child of the CN=Partitions child of the
1607 # config NC
1609 # FOR each crossRef object cr in crossRefList
1610 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1611 # is clear in cr!systemFlags, skip cr.
1612 # LET g be the GRAPH return of SetupGraph()
1614 for part in self.part_table.values():
1616 if not part.is_enabled():
1617 continue
1619 if part.is_foreign():
1620 continue
1622 graph = self.setup_graph(part)
1624 # Create nTDSConnection objects, routing replication traffic
1625 # around "failed" DCs.
1626 found_failed = False
1628 connected, found_failed = self.create_connections(graph, part, True)
1630 if not connected:
1631 all_connected = False
1633 if found_failed:
1634 # One or more failed DCs preclude use of the ideal NC
1635 # replica graph. Add connections for the ideal graph.
1636 self.create_connections(graph, part, False)
1638 return all_connected
1640 def get_spanning_tree_edges(self, graph):
1641 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
1642 # just the shortest-paths connecting colored vertices
1644 internal_edges = set()
1646 for e_set in graph.edge_set:
1647 edgeType = None
1648 for v in graph.vertices:
1649 v.edges = []
1651 # All con_type in an edge set is the same
1652 for e in e_set.edges:
1653 edgeType = e.con_type
1654 for v in e.vertices:
1655 v.edges.append(e)
1657 # Run dijkstra's algorithm with just the red vertices as seeds
1658 # Seed from the full replicas
1659 dijkstra(graph, edgeType, False)
1661 # Process edge set
1662 process_edge_set(graph, e_set, internal_edges)
1664 # Run dijkstra's algorithm with red and black vertices as the seeds
1665 # Seed from both full and partial replicas
1666 dijkstra(graph, edgeType, True)
1668 # Process edge set
1669 process_edge_set(graph, e_set, internal_edges)
1671 # All vertices have root/component as itself
1672 setup_vertices(graph)
1673 process_edge_set(graph, None, internal_edges)
1675 # Phase 2: Run Kruskal's on the internal edges
1676 output_edges, components = kruskal(graph, internal_edges)
1678 # This recalculates the cost for the path connecting the closest red vertex
1679 # Ignoring types is fine because NO suboptimal edge should exist in the graph
1680 dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename
1681 # Phase 3: Process the output
1682 for v in graph.vertices:
1683 if v.is_red():
1684 v.dist_to_red = 0
1685 else:
1686 v.dist_to_red = v.repl_info.cost
1688 # count the components
1689 return self.copy_output_edges(graph, output_edges), components
1691 # This ensures only one-way connections for partial-replicas
1692 def copy_output_edges(self, graph, output_edges):
1693 edge_list = []
1694 vid = self.my_site # object guid for the local dc's site
1696 for edge in output_edges:
1697 # Three-way edges are no problem here since these were created by
1698 # add_out_edge which only has two endpoints
1699 v = edge.vertices[0]
1700 w = edge.vertices[1]
1701 if v.site is vid or w.site is vid:
1702 if (v.is_black() or w.is_black()) and not v.dist_to_red == 2 ** 32 - 1:
1703 edge.directed = True
1705 if w.dist_to_red < v.dist_to_red:
1706 edge.vertices[0] = w
1707 edge.vertices[1] = v
1709 edge_list.append(edge)
1711 return edge_list
1713 def intersite(self):
1714 """The head method for generating the inter-site KCC replica
1715 connection graph and attendant nTDSConnection objects
1716 in the samdb.
1718 Produces self.keep_connection_list[] of NTDS Connections
1719 that should be kept during subsequent pruning process.
1721 ::return (True or False): (True) if the produced NC replica
1722 graph connects all sites that need to be connected
1725 # Retrieve my DSA
1726 mydsa = self.my_dsa
1727 mysite = self.my_site
1728 all_connected = True
1730 logger.debug("intersite(): enter")
1732 # Determine who is the ISTG
1733 if opts.readonly:
1734 mysite.select_istg(self.samdb, mydsa, ro=True)
1735 else:
1736 mysite.select_istg(self.samdb, mydsa, ro=False)
1738 # Test whether local site has topology disabled
1739 if mysite.is_intersite_topology_disabled():
1740 logger.debug("intersite(): exit disabled all_connected=%d" %
1741 all_connected)
1742 return all_connected
1744 if not mydsa.is_istg():
1745 logger.debug("intersite(): exit not istg all_connected=%d" %
1746 all_connected)
1747 return all_connected
1749 self.merge_failed_links()
1751 # For each NC with an NC replica that "should be present" on the
1752 # local DC or "is present" on any DC in the same site as the
1753 # local DC, the KCC constructs a site graph--a precursor to an NC
1754 # replica graph. The site connectivity for a site graph is defined
1755 # by objects of class interSiteTransport, siteLink, and
1756 # siteLinkBridge in the config NC.
1758 all_connected = self.create_intersite_connections()
1760 logger.debug("intersite(): exit all_connected=%d" % all_connected)
1761 return all_connected
1763 def update_rodc_connection(self):
1764 """Runs when the local DC is an RODC and updates the RODC NTFRS
1765 connection object.
1767 # Given an nTDSConnection object cn1, such that cn1.options contains
1768 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1769 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1770 # that the following is true:
1772 # cn1.fromServer = cn2.fromServer
1773 # cn1.schedule = cn2.schedule
1775 # If no such cn2 can be found, cn1 is not modified.
1776 # If no such cn1 can be found, nothing is modified by this task.
1778 # XXX - not implemented yet
1780 def intrasite_max_node_edges(self, node_count):
1781 """Returns the maximum number of edges directed to a node in
1782 the intrasite replica graph.
1784 The KCC does not create more
1785 than 50 edges directed to a single DC. To optimize replication,
1786 we compute that each node should have n+2 total edges directed
1787 to it such that (n) is the smallest non-negative integer
1788 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1790 :param node_count: total number of nodes in the replica graph
1792 n = 0
1793 while True:
1794 if node_count <= (2 * (n * n) + (6 * n) + 7):
1795 break
1796 n = n + 1
1797 n = n + 2
1798 if n < 50:
1799 return n
1800 return 50
1802 def construct_intrasite_graph(self, site_local, dc_local,
1803 nc_x, gc_only, detect_stale):
1805 # We're using the MS notation names here to allow
1806 # correlation back to the published algorithm.
1808 # nc_x - naming context (x) that we are testing if it
1809 # "should be present" on the local DC
1810 # f_of_x - replica (f) found on a DC (s) for NC (x)
1811 # dc_s - DC where f_of_x replica was found
1812 # dc_local - local DC that potentially needs a replica
1813 # (f_of_x)
1814 # r_list - replica list R
1815 # p_of_x - replica (p) is partial and found on a DC (s)
1816 # for NC (x)
1817 # l_of_x - replica (l) is the local replica for NC (x)
1818 # that should appear on the local DC
1819 # r_len = is length of replica list |R|
1821 # If the DSA doesn't need a replica for this
1822 # partition (NC x) then continue
1823 needed, ro, partial = nc_x.should_be_present(dc_local)
1825 logger.debug("construct_intrasite_graph(): enter" +
1826 "\n\tgc_only=%d" % gc_only +
1827 "\n\tdetect_stale=%d" % detect_stale +
1828 "\n\tneeded=%s" % needed +
1829 "\n\tro=%s" % ro +
1830 "\n\tpartial=%s" % partial +
1831 "\n%s" % nc_x)
1833 if not needed:
1834 return
1836 # Create a NCReplica that matches what the local replica
1837 # should say. We'll use this below in our r_list
1838 l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
1839 nc_x.nc_dnstr)
1841 l_of_x.identify_by_basedn(self.samdb)
1843 l_of_x.rep_partial = partial
1844 l_of_x.rep_ro = ro
1846 # Add this replica that "should be present" to the
1847 # needed replica table for this DSA
1848 dc_local.add_needed_replica(l_of_x)
1850 # Empty replica sequence list
1851 r_list = []
1853 # We'll loop thru all the DSAs looking for
1854 # writeable NC replicas that match the naming
1855 # context dn for (nc_x)
1857 for dc_s_dn, dc_s in self.my_site.dsa_table.items():
1859 # If this partition (nc_x) doesn't appear as a
1860 # replica (f_of_x) on (dc_s) then continue
1861 if not nc_x.nc_dnstr in dc_s.current_rep_table.keys():
1862 continue
1864 # Pull out the NCReplica (f) of (x) with the dn
1865 # that matches NC (x) we are examining.
1866 f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
1868 # Replica (f) of NC (x) must be writable
1869 if f_of_x.is_ro():
1870 continue
1872 # Replica (f) of NC (x) must satisfy the
1873 # "is present" criteria for DC (s) that
1874 # it was found on
1875 if not f_of_x.is_present():
1876 continue
1878 # DC (s) must be a writable DSA other than
1879 # my local DC. In other words we'd only replicate
1880 # from other writable DC
1881 if dc_s.is_ro() or dc_s is dc_local:
1882 continue
1884 # Certain replica graphs are produced only
1885 # for global catalogs, so test against
1886 # method input parameter
1887 if gc_only and not dc_s.is_gc():
1888 continue
1890 # DC (s) must be in the same site as the local DC
1891 # as this is the intra-site algorithm. This is
1892 # handled by virtue of placing DSAs in per
1893 # site objects (see enclosing for() loop)
1895 # If NC (x) is intended to be read-only full replica
1896 # for a domain NC on the target DC then the source
1897 # DC should have functional level at minimum WIN2008
1899 # Effectively we're saying that in order to replicate
1900 # to a targeted RODC (which was introduced in Windows 2008)
1901 # then we have to replicate from a DC that is also minimally
1902 # at that level.
1904 # You can also see this requirement in the MS special
1905 # considerations for RODC which state that to deploy
1906 # an RODC, at least one writable domain controller in
1907 # the domain must be running Windows Server 2008
1908 if ro and not partial and nc_x.nc_type == NCType.domain:
1909 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1910 continue
1912 # If we haven't been told to turn off stale connection
1913 # detection and this dsa has a stale connection then
1914 # continue
1915 if detect_stale and self.is_stale_link_connection(dc_s):
1916 continue
1918 # Replica meets criteria. Add it to table indexed
1919 # by the GUID of the DC that it appears on
1920 r_list.append(f_of_x)
1922 # If a partial (not full) replica of NC (x) "should be present"
1923 # on the local DC, append to R each partial replica (p of x)
1924 # such that p "is present" on a DC satisfying the same
1925 # criteria defined above for full replica DCs.
1926 if partial:
1928 # Now we loop thru all the DSAs looking for
1929 # partial NC replicas that match the naming
1930 # context dn for (NC x)
1931 for dc_s_dn, dc_s in self.my_site.dsa_table.items():
1933 # If this partition NC (x) doesn't appear as a
1934 # replica (p) of NC (x) on the dsa DC (s) then
1935 # continue
1936 if not nc_x.nc_dnstr in dc_s.current_rep_table.keys():
1937 continue
1939 # Pull out the NCReplica with the dn that
1940 # matches NC (x) we are examining.
1941 p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
1943 # Replica (p) of NC (x) must be partial
1944 if not p_of_x.is_partial():
1945 continue
1947 # Replica (p) of NC (x) must satisfy the
1948 # "is present" criteria for DC (s) that
1949 # it was found on
1950 if not p_of_x.is_present():
1951 continue
1953 # DC (s) must be a writable DSA other than
1954 # my DSA. In other words we'd only replicate
1955 # from other writable DSA
1956 if dc_s.is_ro() or dc_s is dc_local:
1957 continue
1959 # Certain replica graphs are produced only
1960 # for global catalogs, so test against
1961 # method input parameter
1962 if gc_only and not dc_s.is_gc():
1963 continue
1965 # DC (s) must be in the same site as the local DC
1966 # as this is the intra-site algorithm. This is
1967 # handled by virtue of placing DSAs in per
1968 # site objects (see enclosing for() loop)
1970 # This criteria is moot (a no-op) for this case
1971 # because we are scanning for (partial = True). The
1972 # MS algorithm statement says partial replica scans
1973 # should adhere to the "same" criteria as full replica
1974 # scans so the criteria doesn't change here...its just
1975 # rendered pointless.
1977 # The case that is occurring would be a partial domain
1978 # replica is needed on a local DC global catalog. There
1979 # is no minimum windows behavior for those since GCs
1980 # have always been present.
1981 if ro and not partial and nc_x.nc_type == NCType.domain:
1982 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1983 continue
1985 # If we haven't been told to turn off stale connection
1986 # detection and this dsa has a stale connection then
1987 # continue
1988 if detect_stale and self.is_stale_link_connection(dc_s):
1989 continue
1991 # Replica meets criteria. Add it to table indexed
1992 # by the GUID of the DSA that it appears on
1993 r_list.append(p_of_x)
1995 # Append to R the NC replica that "should be present"
1996 # on the local DC
1997 r_list.append(l_of_x)
1999 r_list.sort(sort_replica_by_dsa_guid)
2001 r_len = len(r_list)
2003 max_node_edges = self.intrasite_max_node_edges(r_len)
2005 # Add a node for each r_list element to the replica graph
2006 graph_list = []
2007 for rep in r_list:
2008 node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
2009 graph_list.append(node)
2011 # For each r(i) from (0 <= i < |R|-1)
2012 i = 0
2013 while i < (r_len-1):
2014 # Add an edge from r(i) to r(i+1) if r(i) is a full
2015 # replica or r(i+1) is a partial replica
2016 if not r_list[i].is_partial() or r_list[i+1].is_partial():
2017 graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)
2019 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2020 # replica or ri is a partial replica.
2021 if not r_list[i+1].is_partial() or r_list[i].is_partial():
2022 graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
2023 i = i + 1
2025 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2026 # or r0 is a partial replica.
2027 if not r_list[r_len-1].is_partial() or r_list[0].is_partial():
2028 graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr)
2030 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2031 # r|R|-1 is a partial replica.
2032 if not r_list[0].is_partial() or r_list[r_len-1].is_partial():
2033 graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr)
2035 # For each existing nTDSConnection object implying an edge
2036 # from rj of R to ri such that j != i, an edge from rj to ri
2037 # is not already in the graph, and the total edges directed
2038 # to ri is less than n+2, the KCC adds that edge to the graph.
2039 i = 0
2040 while i < r_len:
2041 dsa = self.my_site.dsa_table[graph_list[i].dsa_dnstr]
2042 graph_list[i].add_edges_from_connections(dsa)
2043 i = i + 1
2045 i = 0
2046 while i < r_len:
2047 tnode = graph_list[i]
2049 # To optimize replication latency in sites with many NC replicas, the
2050 # KCC adds new edges directed to ri to bring the total edges to n+2,
2051 # where the NC replica rk of R from which the edge is directed
2052 # is chosen at random such that k != i and an edge from rk to ri
2053 # is not already in the graph.
2055 # Note that the KCC tech ref does not give a number for the definition
2056 # of "sites with many NC replicas". At a bare minimum to satisfy
2057 # n+2 edges directed at a node we have to have at least three replicas
2058 # in |R| (i.e. if n is zero then at least replicas from two other graph
2059 # nodes may direct edges to us).
2060 if r_len >= 3:
2061 # pick a random index
2062 findex = rindex = random.randint(0, r_len-1)
2064 # while this node doesn't have sufficient edges
2065 while not tnode.has_sufficient_edges():
2066 # If this edge can be successfully added (i.e. not
2067 # the same node and edge doesn't already exist) then
2068 # select a new random index for the next round
2069 if tnode.add_edge_from(graph_list[rindex].dsa_dnstr):
2070 findex = rindex = random.randint(0, r_len-1)
2071 else:
2072 # Otherwise continue looking against each node
2073 # after the random selection
2074 rindex = rindex + 1
2075 if rindex >= r_len:
2076 rindex = 0
2078 if rindex == findex:
2079 logger.error("Unable to satisfy max edge criteria!")
2080 break
2082 # Print the graph node in debug mode
2083 logger.debug("%s" % tnode)
2085 # For each edge directed to the local DC, ensure a nTDSConnection
2086 # points to us that satisfies the KCC criteria
2087 if graph_list[i].dsa_dnstr == dc_local.dsa_dnstr:
2088 graph_list[i].add_connections_from_edges(dc_local)
2090 i = i + 1
2092 def intrasite(self):
2093 """The head method for generating the intra-site KCC replica
2094 connection graph and attendant nTDSConnection objects
2095 in the samdb
2097 # Retrieve my DSA
2098 mydsa = self.my_dsa
2100 logger.debug("intrasite(): enter")
2102 # Test whether local site has topology disabled
2103 mysite = self.site_table[self.my_site_dnstr]
2104 if mysite.is_intrasite_topology_disabled():
2105 return
2107 detect_stale = (not mysite.is_detect_stale_disabled())
2109 # Loop thru all the partitions.
2110 for partdn, part in self.part_table.items():
2111 self.construct_intrasite_graph(mysite, mydsa, part, False,
2112 detect_stale)
2114 # If the DC is a GC server, the KCC constructs an additional NC
2115 # replica graph (and creates nTDSConnection objects) for the
2116 # config NC as above, except that only NC replicas that "are present"
2117 # on GC servers are added to R.
2118 for partdn, part in self.part_table.items():
2119 if part.is_config():
2120 self.construct_intrasite_graph(mysite, mydsa, part, True,
2121 detect_stale)
2123 # The DC repeats the NC replica graph computation and nTDSConnection
2124 # creation for each of the NC replica graphs, this time assuming
2125 # that no DC has failed. It does so by re-executing the steps as
2126 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2127 # set in the options attribute of the site settings object for
2128 # the local DC's site. (ie. we set "detec_stale" flag to False)
2130 # Loop thru all the partitions.
2131 for partdn, part in self.part_table.items():
2132 self.construct_intrasite_graph(mysite, mydsa, part, False,
2133 False) # don't detect stale
2135 # If the DC is a GC server, the KCC constructs an additional NC
2136 # replica graph (and creates nTDSConnection objects) for the
2137 # config NC as above, except that only NC replicas that "are present"
2138 # on GC servers are added to R.
2139 for partdn, part in self.part_table.items():
2140 if part.is_config():
2141 self.construct_intrasite_graph(mysite, mydsa, part, True,
2142 False) # don't detect stale
2144 if opts.readonly:
2145 # Display any to be added or modified repsFrom
2146 for dnstr, connect in mydsa.connect_table.items():
2147 if connect.to_be_deleted:
2148 logger.info("TO BE DELETED:\n%s" % connect)
2149 if connect.to_be_modified:
2150 logger.info("TO BE MODIFIED:\n%s" % connect)
2151 if connect.to_be_added:
2152 logger.info("TO BE ADDED:\n%s" % connect)
2154 mydsa.commit_connections(self.samdb, ro=True)
2155 else:
2156 # Commit any newly created connections to the samdb
2157 mydsa.commit_connections(self.samdb)
2159 def run(self, dburl, lp, creds):
2160 """Method to perform a complete run of the KCC and
2161 produce an updated topology for subsequent NC replica
2162 syncronization between domain controllers
2164 # We may already have a samdb setup if we are
2165 # currently importing an ldif for a test run
2166 if self.samdb is None:
2167 try:
2168 self.samdb = SamDB(url=dburl,
2169 session_info=system_session(),
2170 credentials=creds, lp=lp)
2172 except ldb.LdbError, (num, msg):
2173 logger.error("Unable to open sam database %s : %s" %
2174 (dburl, msg))
2175 return 1
2177 try:
2178 # Setup
2179 self.load_my_site()
2180 self.load_my_dsa()
2182 self.load_all_sites()
2183 self.load_all_partitions()
2184 self.load_all_transports()
2185 self.load_all_sitelinks()
2187 # These are the published steps (in order) for the
2188 # MS-TECH description of the KCC algorithm
2190 # Step 1
2191 self.refresh_failed_links_connections()
2193 # Step 2
2194 self.intrasite()
2196 # Step 3
2197 all_connected = self.intersite()
2199 # Step 4
2200 self.remove_unneeded_ntdsconn(all_connected)
2202 # Step 5
2203 self.translate_ntdsconn()
2205 # Step 6
2206 self.remove_unneeded_failed_links_connections()
2208 # Step 7
2209 self.update_rodc_connection()
2210 except:
2211 raise
2213 return 0
2215 def import_ldif(self, dburl, lp, creds, ldif_file):
2216 """Routine to import all objects and attributes that are relevent
2217 to the KCC algorithms from a previously exported LDIF file.
2219 The point of this function is to allow a programmer/debugger to
2220 import an LDIF file with non-security relevent information that
2221 was previously extracted from a DC database. The LDIF file is used
2222 to create a temporary abbreviated database. The KCC algorithm can
2223 then run against this abbreviated database for debug or test
2224 verification that the topology generated is computationally the
2225 same between different OSes and algorithms.
2227 :param dburl: path to the temporary abbreviated db to create
2228 :param ldif_file: path to the ldif file to import
2230 if os.path.exists(dburl):
2231 logger.error("Specify a database (%s) that doesn't already exist." %
2232 dburl)
2233 return 1
2235 # Use ["modules:"] as we are attempting to build a sam
2236 # database as opposed to start it here.
2237 self.samdb = Ldb(url=dburl, session_info=system_session(),
2238 lp=lp, options=["modules:"])
2240 self.samdb.transaction_start()
2241 try:
2242 data = read_and_sub_file(ldif_file, None)
2243 self.samdb.add_ldif(data, None)
2245 except Exception, estr:
2246 logger.error("%s" % estr)
2247 self.samdb.transaction_cancel()
2248 return 1
2249 else:
2250 self.samdb.transaction_commit()
2252 self.samdb = None
2254 # We have an abbreviated list of options here because we have built
2255 # an abbreviated database. We use the rootdse and extended-dn
2256 # modules only during this re-open
2257 self.samdb = SamDB(url=dburl, session_info=system_session(),
2258 credentials=creds, lp=lp,
2259 options=["modules:rootdse,extended_dn_out_ldb"])
2260 return 0
2262 def export_ldif(self, dburl, lp, creds, ldif_file):
2263 """Routine to extract all objects and attributes that are relevent
2264 to the KCC algorithms from a DC database.
2266 The point of this function is to allow a programmer/debugger to
2267 extract an LDIF file with non-security relevent information from
2268 a DC database. The LDIF file can then be used to "import" via
2269 the import_ldif() function this file into a temporary abbreviated
2270 database. The KCC algorithm can then run against this abbreviated
2271 database for debug or test verification that the topology generated
2272 is computationally the same between different OSes and algorithms.
2274 :param dburl: LDAP database URL to extract info from
2275 :param ldif_file: output LDIF file name to create
2277 try:
2278 self.samdb = SamDB(url=dburl,
2279 session_info=system_session(),
2280 credentials=creds, lp=lp)
2281 except ldb.LdbError, (enum, estr):
2282 logger.error("Unable to open sam database (%s) : %s" %
2283 (dburl, estr))
2284 return 1
2286 if os.path.exists(ldif_file):
2287 logger.error("Specify a file (%s) that doesn't already exist." %
2288 ldif_file)
2289 return 1
2291 try:
2292 f = open(ldif_file, "w")
2293 except IOError as ioerr:
2294 logger.error("Unable to open (%s) : %s" % (ldif_file, str(ioerr)))
2295 return 1
2297 try:
2298 # Query Partitions
2299 attrs = [ "objectClass",
2300 "objectGUID",
2301 "cn",
2302 "whenChanged",
2303 "objectSid",
2304 "Enabled",
2305 "systemFlags",
2306 "dnsRoot",
2307 "nCName",
2308 "msDS-NC-Replica-Locations",
2309 "msDS-NC-RO-Replica-Locations" ]
2311 sstr = "CN=Partitions,%s" % self.samdb.get_config_basedn()
2312 res = self.samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
2313 attrs=attrs,
2314 expression="(objectClass=crossRef)")
2316 # Write partitions output
2317 write_search_result(self.samdb, f, res)
2319 # Query cross reference container
2320 attrs = [ "objectClass",
2321 "objectGUID",
2322 "cn",
2323 "whenChanged",
2324 "fSMORoleOwner",
2325 "systemFlags",
2326 "msDS-Behavior-Version",
2327 "msDS-EnabledFeature" ]
2329 sstr = "CN=Partitions,%s" % self.samdb.get_config_basedn()
2330 res = self.samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
2331 attrs=attrs,
2332 expression="(objectClass=crossRefContainer)")
2334 # Write cross reference container output
2335 write_search_result(self.samdb, f, res)
2337 # Query Sites
2338 attrs = [ "objectClass",
2339 "objectGUID",
2340 "cn",
2341 "whenChanged",
2342 "systemFlags" ]
2344 sstr = "CN=Sites,%s" % self.samdb.get_config_basedn()
2345 sites = self.samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
2346 attrs=attrs,
2347 expression="(objectClass=site)")
2349 # Write sites output
2350 write_search_result(self.samdb, f, sites)
2352 # Query NTDS Site Settings
2353 for msg in sites:
2354 sitestr = str(msg.dn)
2356 attrs = [ "objectClass",
2357 "objectGUID",
2358 "cn",
2359 "whenChanged",
2360 "interSiteTopologyGenerator",
2361 "interSiteTopologyFailover",
2362 "schedule",
2363 "options" ]
2365 sstr = "CN=NTDS Site Settings,%s" % sitestr
2366 res = self.samdb.search(base=sstr, scope=ldb.SCOPE_BASE,
2367 attrs=attrs)
2369 # Write Site Settings output
2370 write_search_result(self.samdb, f, res)
2372 # Naming context list
2373 nclist = []
2375 # Query Directory Service Agents
2376 for msg in sites:
2377 sstr = str(msg.dn)
2379 ncattrs = [ "hasMasterNCs",
2380 "msDS-hasMasterNCs",
2381 "hasPartialReplicaNCs",
2382 "msDS-HasDomainNCs",
2383 "msDS-hasFullReplicaNCs",
2384 "msDS-HasInstantiatedNCs" ]
2385 attrs = [ "objectClass",
2386 "objectGUID",
2387 "cn",
2388 "whenChanged",
2389 "invocationID",
2390 "options",
2391 "msDS-isRODC",
2392 "msDS-Behavior-Version" ]
2394 res = self.samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
2395 attrs=attrs + ncattrs,
2396 expression="(objectClass=nTDSDSA)")
2398 # Spin thru all the DSAs looking for NC replicas
2399 # and build a list of all possible Naming Contexts
2400 # for subsequent retrieval below
2401 for msg in res:
2402 for k in msg.keys():
2403 if k in ncattrs:
2404 for value in msg[k]:
2405 # Some of these have binary DNs so
2406 # use dsdb_Dn to split out relevent parts
2407 dsdn = dsdb_Dn(self.samdb, value)
2408 dnstr = str(dsdn.dn)
2409 if dnstr not in nclist:
2410 nclist.append(dnstr)
2412 # Write DSA output
2413 write_search_result(self.samdb, f, res)
2415 # Query NTDS Connections
2416 for msg in sites:
2417 sstr = str(msg.dn)
2419 attrs = [ "objectClass",
2420 "objectGUID",
2421 "cn",
2422 "whenChanged",
2423 "options",
2424 "whenCreated",
2425 "enabledConnection",
2426 "schedule",
2427 "transportType",
2428 "fromServer",
2429 "systemFlags" ]
2431 res = self.samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
2432 attrs=attrs,
2433 expression="(objectClass=nTDSConnection)")
2434 # Write NTDS Connection output
2435 write_search_result(self.samdb, f, res)
2438 # Query Intersite transports
2439 attrs = [ "objectClass",
2440 "objectGUID",
2441 "cn",
2442 "whenChanged",
2443 "options",
2444 "name",
2445 "bridgeheadServerListBL",
2446 "transportAddressAttribute" ]
2448 sstr = "CN=Inter-Site Transports,CN=Sites,%s" % \
2449 self.samdb.get_config_basedn()
2450 res = self.samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
2451 attrs=attrs,
2452 expression="(objectClass=interSiteTransport)")
2454 # Write inter-site transport output
2455 write_search_result(self.samdb, f, res)
2457 # Query siteLink
2458 attrs = [ "objectClass",
2459 "objectGUID",
2460 "cn",
2461 "whenChanged",
2462 "systemFlags",
2463 "options",
2464 "schedule",
2465 "replInterval",
2466 "siteList",
2467 "cost" ]
2469 sstr = "CN=Sites,%s" % \
2470 self.samdb.get_config_basedn()
2471 res = self.samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
2472 attrs=attrs,
2473 expression="(objectClass=siteLink)")
2475 # Write siteLink output
2476 write_search_result(self.samdb, f, res)
2478 # Query siteLinkBridge
2479 attrs = [ "objectClass",
2480 "objectGUID",
2481 "cn",
2482 "whenChanged",
2483 "siteLinkList" ]
2485 sstr = "CN=Sites,%s" % self.samdb.get_config_basedn()
2486 res = self.samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
2487 attrs=attrs,
2488 expression="(objectClass=siteLinkBridge)")
2490 # Write siteLinkBridge output
2491 write_search_result(self.samdb, f, res)
2493 # Query servers containers
2494 # Needed for samdb.server_site_name()
2495 attrs = [ "objectClass",
2496 "objectGUID",
2497 "cn",
2498 "whenChanged",
2499 "systemFlags" ]
2501 sstr = "CN=Sites,%s" % self.samdb.get_config_basedn()
2502 res = self.samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
2503 attrs=attrs,
2504 expression="(objectClass=serversContainer)")
2506 # Write servers container output
2507 write_search_result(self.samdb, f, res)
2509 # Query servers
2510 # Needed because some transport interfaces refer back to
2511 # attributes found in the server object. Also needed
2512 # so extended-dn will be happy with dsServiceName in rootDSE
2513 attrs = [ "objectClass",
2514 "objectGUID",
2515 "cn",
2516 "whenChanged",
2517 "systemFlags",
2518 "dNSHostName",
2519 "mailAddress" ]
2521 sstr = "CN=Sites,%s" % self.samdb.get_config_basedn()
2522 res = self.samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
2523 attrs=attrs,
2524 expression="(objectClass=server)")
2526 # Write server output
2527 write_search_result(self.samdb, f, res)
2529 # Query Naming Context replicas
2530 attrs = [ "objectClass",
2531 "objectGUID",
2532 "cn",
2533 "whenChanged",
2534 "objectSid",
2535 "fSMORoleOwner",
2536 "msDS-Behavior-Version",
2537 "repsFrom",
2538 "repsTo" ]
2540 for sstr in nclist:
2541 res = self.samdb.search(sstr, scope=ldb.SCOPE_BASE,
2542 attrs=attrs)
2544 # Write naming context output
2545 write_search_result(self.samdb, f, res)
2547 # Query rootDSE replicas
2548 attrs=[ "objectClass",
2549 "objectGUID",
2550 "cn",
2551 "whenChanged",
2552 "rootDomainNamingContext",
2553 "configurationNamingContext",
2554 "schemaNamingContext",
2555 "defaultNamingContext",
2556 "dsServiceName" ]
2558 sstr = ""
2559 res = self.samdb.search(sstr, scope=ldb.SCOPE_BASE,
2560 attrs=attrs)
2562 # Record the rootDSE object as a dn as it
2563 # would appear in the base ldb file. We have
2564 # to save it this way because we are going to
2565 # be importing as an abbreviated database.
2566 res[0].dn = ldb.Dn(self.samdb, "@ROOTDSE")
2568 # Write rootdse output
2569 write_search_result(self.samdb, f, res)
2571 except ldb.LdbError, (enum, estr):
2572 logger.error("Error processing (%s) : %s" % (sstr, estr))
2573 return 1
2575 f.close()
2576 return 0
2578 ##################################################
2579 # Global Functions
2580 ##################################################
2581 def sort_replica_by_dsa_guid(rep1, rep2):
2582 return cmp(rep1.rep_dsa_guid, rep2.rep_dsa_guid)
2584 def sort_dsa_by_gc_and_guid(dsa1, dsa2):
2585 if dsa1.is_gc() and not dsa2.is_gc():
2586 return -1
2587 if not dsa1.is_gc() and dsa2.is_gc():
2588 return +1
2589 return cmp(dsa1.dsa_guid, dsa2.dsa_guid)
2591 def is_smtp_replication_available():
2592 """Currently always returns false because Samba
2593 doesn't implement SMTP transfer for NC changes
2594 between DCs
2596 return False
2598 def write_search_result(samdb, f, res):
2599 for msg in res:
2600 lstr = samdb.write_ldif(msg, ldb.CHANGETYPE_NONE)
2601 f.write("%s" % lstr)
2603 def create_edge(con_type, site_link, dn_to_vertex):
2604 e = MultiEdge()
2605 e.site_link = site_link
2606 e.vertices = []
2607 for site in site_link.site_list:
2608 if site in dn_to_vertex:
2609 e.vertices.extend(dn_to_vertex.get(site))
2610 e.repl_info.cost = site_link.cost
2611 e.repl_info.options = site_link.options
2612 e.repl_info.interval = site_link.interval
2613 e.repl_info.schedule = site_link.schedule
2614 e.con_type = con_type
2615 e.directed = False
2616 return e
2618 def create_auto_edge_set(graph, transport):
2619 e_set = MultiEdgeSet()
2620 e_set.guid = None # TODO Null guid Not associated with a SiteLinkBridge object
2621 for site_link in graph.edges:
2622 if site_link.con_type == transport:
2623 e_set.edges.append(site_link)
2625 return e_set
2627 def create_edge_set(graph, transport, site_link_bridge):
2628 # TODO not implemented - need to store all site link bridges
2629 e_set = MultiEdgeSet()
2630 # e_set.guid = site_link_bridge
2631 return e_set
2633 def setup_vertices(graph):
2634 for v in graph.vertices:
2635 if v.is_white():
2636 v.repl_info.cost = 2 ** 32 - 1
2637 v.root = None
2638 v.component_id = None
2639 else:
2640 v.repl_info.cost = 0
2641 v.root = v
2642 v.component_id = v
2644 v.repl_info.interval = 0
2645 v.repl_info.options = 0xFFFFFFFF
2646 v.repl_info.schedule = None # TODO highly suspicious
2647 v.demoted = False
2649 def dijkstra(graph, edge_type, include_black):
2650 queue = []
2651 setup_dijkstra(graph, edge_type, include_black, queue)
2652 while len(queue) > 0:
2653 cost, guid, vertex = heapq.heappop(queue)
2654 for edge in vertex.edges:
2655 for v in edge.vertices:
2656 if v is not vertex:
2657 # add new path from vertex to v
2658 try_new_path(graph, queue, vertex, edge, v)
2660 def setup_dijkstra(graph, edge_type, include_black, queue):
2661 setup_vertices(graph)
2662 for vertex in graph.vertices:
2663 if vertex.is_white():
2664 continue
2666 if ((vertex.is_black() and not include_black)
2667 or edge_type not in vertex.accept_black
2668 or edge_type not in vertex.accept_red_red):
2669 vertex.repl_info.cost = 2 ** 32 - 1
2670 vertex.root = None # NULL GUID
2671 vertex.demoted = True # Demoted appears not to be used
2672 else:
2673 # TODO guid must be string?
2674 heapq.heappush(queue, (vertex.replInfo.cost, vertex.guid, vertex))
2676 def try_new_path(graph, queue, vfrom, edge, vto):
2677 newRI = ReplInfo()
2678 # What this function checks is that there is a valid time frame for
2679 # which replication can actually occur, despite being adequately
2680 # connected
2681 intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI)
2683 # If the new path costs more than the current, then ignore the edge
2684 if newRI.cost > vto.repl_info.cost:
2685 return
2687 if newRI.cost < vto.repl_info.cost and not intersect:
2688 return
2690 new_duration = total_schedule(newRI.schedule)
2691 old_duration = total_schedule(vto.repl_info.schedule)
2693 # Cheaper or longer schedule
2694 if newRI.cost < vto.repl_info.cost or new_duration > old_duration:
2695 vto.root = vfrom.root
2696 vto.component_id = vfrom.component_id
2697 vto.repl_info = newRI
2698 heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))
2700 def check_demote_vertex(vertex, edge_type):
2701 if vertex.is_white():
2702 return
2704 # Accepts neither red-red nor black edges, demote
2705 if edge_type not in vertex.accept_black and edge_type not in vertex.accept_red_red:
2706 vertex.repl_info.cost = 2 ** 32 - 1
2707 vertex.root = None
2708 vertex.demoted = True # Demoted appears not to be used
2710 def undemote_vertex(vertex):
2711 if vertex.is_white():
2712 return
2714 vertex.repl_info.cost = 0
2715 vertex.root = vertex
2716 vertex.demoted = False
2718 def process_edge_set(graph, e_set, internal_edges):
2719 if e_set is None:
2720 for edge in graph.edges:
2721 for vertex in edge.vertices:
2722 check_demote_vertex(vertex, edge.con_type)
2723 process_edge(graph, edge, internal_edges)
2724 for vertex in edge.vertices:
2725 undemote_vertex(vertex)
2726 else:
2727 for edge in e_set.edges:
2728 process_edge(graph, edge, internal_edges)
2730 def process_edge(graph, examine, internal_edges):
2731 # Find the set of all vertices touches the edge to examine
2732 vertices = []
2733 for v in examine.vertices:
2734 # Append a 4-tuple of color, repl cost, guid and vertex
2735 vertices.append((v.color, v.repl_info.cost, v.guid, v))
2736 # Sort by color, lower
2737 vertices.sort()
2739 color, cost, guid, bestv = vertices[0]
2740 # Add to internal edges an edge from every colored vertex to bestV
2741 for v in examine.vertices:
2742 if v.component_id is None or v.root is None:
2743 continue
2745 # Only add edge if valid inter-tree edge - needs a root and
2746 # different components
2747 if (bestv.component_id is not None and bestv.root is not None
2748 and v.component_id is not None and v.root is not None and
2749 bestv.component_id != v.component_id):
2750 add_int_edge(graph, internal_edges, examine, bestv, v)
2752 # Add internal edge, endpoints are roots of the vertices to pass in and are always colored
2753 def add_int_edge(graph, internal_edges, examine, v1, v2):
2754 root1 = v1.root
2755 root2 = v2.root
2757 red_red = False
2758 if root1.is_red() and root2.is_red():
2759 red_red = True
2761 if red_red:
2762 if (examine.con_type not in root1.accept_red_red
2763 or examine.con_type not in root2.accept_red_red):
2764 return
2765 else:
2766 if (examine.con_type not in root1.accept_black
2767 or examine.con_type not in root2.accept_black):
2768 return
2770 ri = ReplInfo()
2771 ri2 = ReplInfo()
2773 # Create the transitive replInfo for the two trees and this edge
2774 if not combine_repl_info(v1.repl_info, v2.repl_info, ri):
2775 return
2776 # ri is now initialized
2777 if not combine_repl_info(ri, examine.repl_info, ri2):
2778 return
2780 newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type)
2781 # Order by vertex guid
2782 if newIntEdge.v1.guid > newIntEdge.v2.guid: # TODO compare guid (str)
2783 newIntEdge.v1 = root2
2784 newIntEdge.v2 = root1
2786 internal_edges.add(newIntEdge)
2788 def kruskal(graph, edges):
2789 for v in graph.vertices:
2790 v.edges = []
2792 components = set(graph.vertices)
2793 edges = list(edges)
2795 # Sorted based on internal comparison function of internal edge
2796 edges.sort()
2798 expected_num_tree_edges = 0 # TODO this value makes little sense
2800 count_edges = 0
2801 output_edges = []
2802 index = 0
2803 while index < len(edges): # TODO and num_components > 1
2804 e = edges[index]
2805 parent1 = find_component(e.v1)
2806 parent2 = find_component(e.v2)
2807 if parent1 is not parent2:
2808 count_edges += 1
2809 add_out_edge(graph, output_edges, e)
2810 parent1.component = parent2
2811 components.discard(parent1)
2813 index += 1
2815 return output_edges, len(components)
2817 def find_component(vertex):
2818 if vertex.component is vertex:
2819 return vertex
2821 current = vertex
2822 while current.component is not current:
2823 current = current.component
2825 root = current
2826 current = vertex
2827 while current.component is not root:
2828 n = current.component
2829 current.component = root
2830 current = n
2832 return root
2834 def add_out_edge(graph, output_edges, e):
2835 v1 = e.v1
2836 v2 = e.v2
2838 # This multi-edge is a 'real' edge with no GUID
2839 ee = MultiEdge()
2840 ee.directed = False
2841 ee.vertices.append(v1)
2842 ee.vertices.append(v2)
2843 ee.con_type = e.e_type
2844 ee.repl_info = e.repl_info
2845 output_edges.append(ee)
2847 v1.edges.append(ee)
2848 v2.edges.append(ee)
2852 ##################################################
2853 # samba_kcc entry point
2854 ##################################################
2856 parser = optparse.OptionParser("samba_kcc [options]")
2857 sambaopts = options.SambaOptions(parser)
2858 credopts = options.CredentialsOptions(parser)
2860 parser.add_option_group(sambaopts)
2861 parser.add_option_group(credopts)
2862 parser.add_option_group(options.VersionOptions(parser))
2864 parser.add_option("--readonly",
2865 help="compute topology but do not update database",
2866 action="store_true")
2868 parser.add_option("--debug",
2869 help="debug output",
2870 action="store_true")
2872 parser.add_option("--seed",
2873 help="random number seed",
2874 type=str, metavar="<number>")
2876 parser.add_option("--importldif",
2877 help="import topology ldif file",
2878 type=str, metavar="<file>")
2880 parser.add_option("--exportldif",
2881 help="export topology ldif file",
2882 type=str, metavar="<file>")
2884 parser.add_option("-H", "--URL" ,
2885 help="LDB URL for database or target server",
2886 type=str, metavar="<URL>", dest="dburl")
2888 parser.add_option("--tmpdb",
2889 help="schemaless database file to create for ldif import",
2890 type=str, metavar="<file>")
2892 logger = logging.getLogger("samba_kcc")
2893 logger.addHandler(logging.StreamHandler(sys.stdout))
2895 lp = sambaopts.get_loadparm()
2896 creds = credopts.get_credentials(lp, fallback_machine=True)
2898 opts, args = parser.parse_args()
2900 if opts.readonly is None:
2901 opts.readonly = False
2903 if opts.debug:
2904 logger.setLevel(logging.DEBUG)
2905 elif opts.readonly:
2906 logger.setLevel(logging.INFO)
2907 else:
2908 logger.setLevel(logging.WARNING)
2910 # initialize seed from optional input parameter
2911 if opts.seed:
2912 random.seed(int(opts.seed))
2913 else:
2914 random.seed(0xACE5CA11)
2916 if opts.dburl is None:
2917 opts.dburl = lp.samdb_url()
2919 # Instantiate Knowledge Consistency Checker and perform run
2920 kcc = KCC()
2922 if opts.exportldif:
2923 rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
2924 sys.exit(rc)
2926 if opts.importldif:
2927 if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
2928 logger.error("Specify a target temp database file with --tmpdb option.")
2929 sys.exit(1)
2931 rc = kcc.import_ldif(opts.tmpdb, lp, creds, opts.importldif)
2932 if rc != 0:
2933 sys.exit(rc)
2935 rc = kcc.run(opts.dburl, lp, creds)
2936 sys.exit(rc)