3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 3 of the License, or
10 # (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 # ensure we get messages out immediately, so they get in the samba logs,
25 # and don't get swallowed by a timeout
26 os
.environ
['PYTHONUNBUFFERED'] = '1'
28 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
29 # heimdal can get mutual authentication errors due to the 24 second difference
30 # between UTC and GMT when using some zone files (eg. the PDT zone from
32 os
.environ
["TZ"] = "GMT"
34 # Find right directory when running from source tree
35 sys
.path
.insert(0, "bin/python")
48 from samba
.auth
import system_session
49 from samba
.samdb
import SamDB
50 from samba
.dcerpc
import drsuapi
51 from samba
.kcc_utils
import *
56 """The Knowledge Consistency Checker class.
58 A container for objects and methods allowing a run of the KCC. Produces a
59 set of connections in the samdb for which the Distributed Replication
60 Service can then utilize to replicate naming contexts
63 """Initializes the partitions class which can hold
64 our local DCs partitions or all the partitions in
67 self
.part_table
= {} # partition objects
69 self
.transport_table
= {}
70 self
.sitelink_table
= {}
72 # TODO: These should be backed by a 'permanent' store so that when
73 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
74 # the failure information can be returned
75 self
.kcc_failed_links
= {}
76 self
.kcc_failed_connections
= set()
78 # Used in inter-site topology computation. A list
79 # of connections (by NTDSConnection object) that are
80 # to be kept when pruning un-needed NTDS Connections
81 self
.keep_connection_list
= []
83 self
.my_dsa_dnstr
= None # My dsa DN
84 self
.my_dsa
= None # My dsa object
86 self
.my_site_dnstr
= None
87 self
.my_site_guid
= None
92 def load_all_transports(self
):
93 """Loads the inter-site transport objects for Sites
95 ::returns: Raises an Exception on error
98 res
= self
.samdb
.search("CN=Inter-Site Transports,CN=Sites,%s" %
99 self
.samdb
.get_config_basedn(),
100 scope
=ldb
.SCOPE_SUBTREE
,
101 expression
="(objectClass=interSiteTransport)")
102 except ldb
.LdbError
, (enum
, estr
):
103 raise Exception("Unable to find inter-site transports - (%s)" %
109 transport
= Transport(dnstr
)
111 transport
.load_transport(self
.samdb
)
114 if str(transport
.guid
) in self
.transport_table
.keys():
117 # Assign this transport to table
119 self
.transport_table
[str(transport
.guid
)] = transport
121 def load_all_sitelinks(self
):
122 """Loads the inter-site siteLink objects
124 ::returns: Raises an Exception on error
127 res
= self
.samdb
.search("CN=Inter-Site Transports,CN=Sites,%s" %
128 self
.samdb
.get_config_basedn(),
129 scope
=ldb
.SCOPE_SUBTREE
,
130 expression
="(objectClass=siteLink)")
131 except ldb
.LdbError
, (enum
, estr
):
132 raise Exception("Unable to find inter-site siteLinks - (%s)" % estr
)
138 if dnstr
in self
.sitelink_table
.keys():
141 sitelink
= SiteLink(dnstr
)
143 sitelink
.load_sitelink(self
.samdb
)
145 # Assign this siteLink to table
147 self
.sitelink_table
[dnstr
] = sitelink
149 def load_my_site(self
):
150 """Loads the Site class for the local DSA
152 ::returns: Raises an Exception on error
154 self
.my_site_dnstr
= "CN=%s,CN=Sites,%s" % (
155 self
.samdb
.server_site_name(),
156 self
.samdb
.get_config_basedn())
157 site
= Site(self
.my_site_dnstr
)
158 site
.load_site(self
.samdb
)
160 self
.site_table
[str(site
.site_guid
)] = site
161 self
.my_site_guid
= site
.site_guid
164 def load_all_sites(self
):
165 """Discover all sites and instantiate and load each
168 ::returns: Raises an Exception on error
171 res
= self
.samdb
.search("CN=Sites,%s" %
172 self
.samdb
.get_config_basedn(),
173 scope
=ldb
.SCOPE_SUBTREE
,
174 expression
="(objectClass=site)")
175 except ldb
.LdbError
, (enum
, estr
):
176 raise Exception("Unable to find sites - (%s)" % estr
)
179 sitestr
= str(msg
.dn
)
182 site
.load_site(self
.samdb
)
185 if str(site
.site_guid
) in self
.site_table
.keys():
188 self
.site_table
[str(site
.site_guid
)] = site
190 def load_my_dsa(self
):
191 """Discover my nTDSDSA dn thru the rootDSE entry
193 ::returns: Raises an Exception on error.
195 dn
= ldb
.Dn(self
.samdb
, "")
197 res
= self
.samdb
.search(base
=dn
, scope
=ldb
.SCOPE_BASE
,
198 attrs
=["dsServiceName"])
199 except ldb
.LdbError
, (enum
, estr
):
200 raise Exception("Unable to find my nTDSDSA - (%s)" % estr
)
202 self
.my_dsa_dnstr
= res
[0]["dsServiceName"][0]
203 self
.my_dsa
= self
.my_site
.get_dsa(self
.my_dsa_dnstr
)
205 def load_all_partitions(self
):
206 """Discover all NCs thru the Partitions dn and
207 instantiate and load the NCs.
209 Each NC is inserted into the part_table by partition
210 dn string (not the nCName dn string)
212 ::returns: Raises an Exception on error
215 res
= self
.samdb
.search("CN=Partitions,%s" %
216 self
.samdb
.get_config_basedn(),
217 scope
=ldb
.SCOPE_SUBTREE
,
218 expression
="(objectClass=crossRef)")
219 except ldb
.LdbError
, (enum
, estr
):
220 raise Exception("Unable to find partitions - (%s)" % estr
)
223 partstr
= str(msg
.dn
)
226 if partstr
in self
.part_table
.keys():
229 part
= Partition(partstr
)
231 part
.load_partition(self
.samdb
)
232 self
.part_table
[partstr
] = part
234 def should_be_present_test(self
):
235 """Enumerate all loaded partitions and DSAs in local
236 site and test if NC should be present as replica
238 for partdn
, part
in self
.part_table
.items():
239 for dsadn
, dsa
in self
.my_site
.dsa_table
.items():
240 needed
, ro
, partial
= part
.should_be_present(dsa
)
241 logger
.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
242 (dsadn
, part
.nc_dnstr
, needed
, ro
, partial
))
244 def refresh_failed_links_connections(self
):
245 """Instead of NULL link with failure_count = 0, the tuple is simply removed"""
247 # LINKS: Refresh failed links
248 self
.kcc_failed_links
= {}
249 current
, needed
= self
.my_dsa
.get_rep_tables()
250 for replica
in current
.values():
251 # For every possible connection to replicate
252 for reps_from
in replica
.rep_repsFrom
:
253 failure_count
= reps_from
.consecutive_sync_failures
254 if failure_count
<= 0:
257 dsa_guid
= str(reps_from
.source_dsa_obj_guid
)
258 time_first_failure
= reps_from
.last_success
259 last_result
= reps_from
.last_attempt
260 dns_name
= reps_from
.dns_name1
262 f
= self
.kcc_failed_links
.get(dsa_guid
)
264 f
= KCCFailedObject(dsa_guid
, failure_count
,
265 time_first_failure
, last_result
,
267 self
.kcc_failed_links
[dsa_guid
] = f
268 #elif f.failure_count == 0:
269 # f.failure_count = failure_count
270 # f.time_first_failure = time_first_failure
271 # f.last_result = last_result
273 f
.failure_count
= max(f
.failure_count
, failure_count
)
274 f
.time_first_failure
= min(f
.time_first_failure
, time_first_failure
)
275 f
.last_result
= last_result
277 # CONNECTIONS: Refresh failed connections
278 restore_connections
= set()
279 for connection
in self
.kcc_failed_connections
:
281 drs_utils
.drsuapi_connect(connection
.dns_name
, lp
, creds
)
282 # Failed connection is no longer failing
283 restore_connections
.add(connection
)
284 except drs_utils
.drsException
:
285 # Failed connection still failing
286 connection
.failure_count
+= 1
288 # Remove the restored connections from the failed connections
289 self
.kcc_failed_connections
.difference_update(restore_connections
)
291 def is_stale_link_connection(self
, target_dsa
):
292 """Returns False if no tuple z exists in the kCCFailedLinks or
293 kCCFailedConnections variables such that z.UUIDDsa is the
294 objectGUID of the target dsa, z.FailureCount > 0, and
295 the current time - z.TimeFirstFailure > 2 hours.
297 # Returns True if tuple z exists...
298 failed_link
= self
.kcc_failed_links
.get(str(target_dsa
.dsa_guid
))
300 # failure_count should be > 0, but check anyways
301 if failed_link
.failure_count
> 0:
302 unix_first_time_failure
= nttime2unix(failed_link
.time_first_failure
)
303 # TODO guard against future
304 current_time
= int(time
.time())
305 if unix_first_time_failure
> current_time
:
306 logger
.error("The last success time attribute for \
307 repsFrom is in the future!")
309 # Perform calculation in seconds
310 if (current_time
- unix_first_time_failure
) > 60 * 60 * 2:
317 # TODO: This should be backed by some form of local database
318 def remove_unneeded_failed_links_connections(self
):
319 # Remove all tuples in kcc_failed_links where failure count = 0
320 # In this implementation, this should never happen.
322 # Remove all connections which were not used this run or connections
323 # that became active during this run.
326 def remove_unneeded_ntdsconn(self
, all_connected
):
327 """Removes unneeded NTDS Connections after computation
328 of KCC intra and inter-site topology has finished.
332 # Loop thru connections
333 for cn_dnstr
, cn_conn
in mydsa
.connect_table
.items():
335 s_dnstr
= cn_conn
.get_from_dnstr()
337 cn_conn
.to_be_deleted
= True
340 # Get the source DSA no matter what site
341 s_dsa
= self
.get_dsa(s_dnstr
)
343 # Check if the DSA is in our site
344 if self
.my_site
.same_site(s_dsa
):
349 # Given an nTDSConnection object cn, if the DC with the
350 # nTDSDSA object dc that is the parent object of cn and
351 # the DC with the nTDSDA object referenced by cn!fromServer
352 # are in the same site, the KCC on dc deletes cn if all of
353 # the following are true:
355 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
357 # No site settings object s exists for the local DC's site, or
358 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
361 # Another nTDSConnection object cn2 exists such that cn and
362 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
365 # cn!whenCreated < cn2!whenCreated
367 # cn!whenCreated = cn2!whenCreated and
368 # cn!objectGUID < cn2!objectGUID
370 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
372 if not cn_conn
.is_generated():
375 if self
.my_site
.is_cleanup_ntdsconn_disabled():
378 # Loop thru connections looking for a duplicate that
379 # fulfills the previous criteria
382 for cn2_dnstr
, cn2_conn
in mydsa
.connect_table
.items():
383 if cn2_conn
is cn_conn
:
386 s2_dnstr
= cn2_conn
.get_from_dnstr()
390 # If the NTDS Connections has a different
391 # fromServer field then no match
392 if s2_dnstr
!= s_dnstr
:
395 lesser
= (cn_conn
.whenCreated
< cn2_conn
.whenCreated
or
396 (cn_conn
.whenCreated
== cn2_conn
.whenCreated
and
397 cmp(cn_conn
.guid
, cn2_conn
.guid
) < 0))
402 if lesser
and not cn_conn
.is_rodc_topology():
403 cn_conn
.to_be_deleted
= True
405 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
406 # object dc that is the parent object of cn and the DC with
407 # the nTDSDSA object referenced by cn!fromServer are in
408 # different sites, a KCC acting as an ISTG in dc's site
409 # deletes cn if all of the following are true:
411 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
413 # cn!fromServer references an nTDSDSA object for a DC
414 # in a site other than the local DC's site.
416 # The keepConnections sequence returned by
417 # CreateIntersiteConnections() does not contain
418 # cn!objectGUID, or cn is "superseded by" (see below)
419 # another nTDSConnection cn2 and keepConnections
420 # contains cn2!objectGUID.
422 # The return value of CreateIntersiteConnections()
425 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
428 else: # different site
430 if not mydsa
.is_istg():
433 if not cn_conn
.is_generated():
437 # We are directly using this connection in intersite or
438 # we are using a connection which can supersede this one.
440 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
441 # appear to be correct.
443 # 1. cn!fromServer and cn!parent appear inconsistent with no cn2
444 # 2. The repsFrom do not imply each other
446 if self
.keep_connection(cn_conn
): # and not_superceded:
449 # This is the result of create_intersite_connections
450 if not all_connected
:
453 if not cn_conn
.is_rodc_topology():
454 cn_conn
.to_be_deleted
= True
457 if mydsa
.is_ro() or opts
.readonly
:
458 for dnstr
, connect
in mydsa
.connect_table
.items():
459 if connect
.to_be_deleted
:
460 logger
.info("TO BE DELETED:\n%s" % connect
)
461 if connect
.to_be_added
:
462 logger
.info("TO BE ADDED:\n%s" % connect
)
464 # Peform deletion from our tables but perform
465 # no database modification
466 mydsa
.commit_connections(self
.samdb
, ro
=True)
468 # Commit any modified connections
469 mydsa
.commit_connections(self
.samdb
)
471 def get_dsa_by_guidstr(self
, guidstr
):
472 """Given a DSA guid string, consule all sites looking
473 for the corresponding DSA and return it.
475 for site
in self
.site_table
.values():
476 dsa
= site
.get_dsa_by_guidstr(guidstr
)
481 def get_dsa(self
, dnstr
):
482 """Given a DSA dn string, consule all sites looking
483 for the corresponding DSA and return it.
485 for site
in self
.site_table
.values():
486 dsa
= site
.get_dsa(dnstr
)
491 def modify_repsFrom(self
, n_rep
, t_repsFrom
, s_rep
, s_dsa
, cn_conn
):
492 """Update t_repsFrom if necessary to satisfy requirements. Such
493 updates are typically required when the IDL_DRSGetNCChanges
494 server has moved from one site to another--for example, to
495 enable compression when the server is moved from the
496 client's site to another site.
498 :param n_rep: NC replica we need
499 :param t_repsFrom: repsFrom tuple to modify
500 :param s_rep: NC replica at source DSA
501 :param s_dsa: source DSA
502 :param cn_conn: Local DSA NTDSConnection child
504 ::returns: (update) bit field containing which portion of the
505 repsFrom was modified. This bit field is suitable as input
506 to IDL_DRSReplicaModify ulModifyFields element, as it consists
508 drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
509 drsuapi.DRSUAPI_DRS_UPDATE_FLAGS
510 drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS
512 s_dnstr
= s_dsa
.dsa_dnstr
515 if self
.my_site
.same_site(s_dsa
):
520 times
= cn_conn
.convert_schedule_to_repltimes()
522 # if schedule doesn't match then update and modify
523 if times
!= t_repsFrom
.schedule
:
524 t_repsFrom
.schedule
= times
526 # Bit DRS_PER_SYNC is set in replicaFlags if and only
527 # if nTDSConnection schedule has a value v that specifies
528 # scheduled replication is to be performed at least once
530 if cn_conn
.is_schedule_minimum_once_per_week():
532 if (t_repsFrom
.replica_flags
&
533 drsuapi
.DRSUAPI_DRS_PER_SYNC
) == 0x0:
534 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_PER_SYNC
536 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
537 # if the source DSA and the local DC's nTDSDSA object are
538 # in the same site or source dsa is the FSMO role owner
539 # of one or more FSMO roles in the NC replica.
540 if same_site
or n_rep
.is_fsmo_role_owner(s_dnstr
):
542 if (t_repsFrom
.replica_flags
&
543 drsuapi
.DRSUAPI_DRS_INIT_SYNC
) == 0x0:
544 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_INIT_SYNC
546 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
547 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
548 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
549 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
550 # t.replicaFlags if and only if s and the local DC's
551 # nTDSDSA object are in different sites.
552 if (cn_conn
.options
& dsdb
.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT
) != 0x0:
554 if (cn_conn
.options
& dsdb
.NTDSCONN_OPT_USE_NOTIFY
) == 0x0:
556 if (t_repsFrom
.replica_flags
&
557 drsuapi
.DRSUAPI_DRS_NEVER_NOTIFY
) == 0x0:
558 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_NEVER_NOTIFY
562 if (t_repsFrom
.replica_flags
&
563 drsuapi
.DRSUAPI_DRS_NEVER_NOTIFY
) == 0x0:
564 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_NEVER_NOTIFY
566 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
567 # and only if s and the local DC's nTDSDSA object are
568 # not in the same site and the
569 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
570 # clear in cn!options
571 if (not same_site
and
573 dsdb
.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
) == 0x0):
575 if (t_repsFrom
.replica_flags
&
576 drsuapi
.DRSUAPI_DRS_USE_COMPRESSION
) == 0x0:
577 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_USE_COMPRESSION
579 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
580 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
581 if (cn_conn
.options
& dsdb
.NTDSCONN_OPT_TWOWAY_SYNC
) != 0x0:
583 if (t_repsFrom
.replica_flags
&
584 drsuapi
.DRSUAPI_DRS_TWOWAY_SYNC
) == 0x0:
585 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_TWOWAY_SYNC
587 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
588 # set in t.replicaFlags if and only if cn!enabledConnection = false.
589 if not cn_conn
.is_enabled():
591 if (t_repsFrom
.replica_flags
&
592 drsuapi
.DRSUAPI_DRS_DISABLE_AUTO_SYNC
) == 0x0:
593 t_repsFrom
.replica_flags |
= \
594 drsuapi
.DRSUAPI_DRS_DISABLE_AUTO_SYNC
596 if (t_repsFrom
.replica_flags
&
597 drsuapi
.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
) == 0x0:
598 t_repsFrom
.replica_flags |
= \
599 drsuapi
.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
601 # If s and the local DC's nTDSDSA object are in the same site,
602 # cn!transportType has no value, or the RDN of cn!transportType
605 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
607 # t.uuidTransport = NULL GUID.
609 # t.uuidDsa = The GUID-based DNS name of s.
613 # Bit DRS_MAIL_REP in t.replicaFlags is set.
615 # If x is the object with dsname cn!transportType,
616 # t.uuidTransport = x!objectGUID.
618 # Let a be the attribute identified by
619 # x!transportAddressAttribute. If a is
620 # the dNSHostName attribute, t.uuidDsa = the GUID-based
621 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
623 # It appears that the first statement i.e.
625 # "If s and the local DC's nTDSDSA object are in the same
626 # site, cn!transportType has no value, or the RDN of
627 # cn!transportType is CN=IP:"
629 # could be a slightly tighter statement if it had an "or"
630 # between each condition. I believe this should
633 # IF (same-site) OR (no-value) OR (type-ip)
635 # because IP should be the primary transport mechanism
636 # (even in inter-site) and the absense of the transportType
637 # attribute should always imply IP no matter if its multi-site
639 # NOTE MS-TECH INCORRECT:
641 # All indications point to these statements above being
642 # incorrectly stated:
644 # t.uuidDsa = The GUID-based DNS name of s.
646 # Let a be the attribute identified by
647 # x!transportAddressAttribute. If a is
648 # the dNSHostName attribute, t.uuidDsa = the GUID-based
649 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
651 # because the uuidDSA is a GUID and not a GUID-base DNS
652 # name. Nor can uuidDsa hold (s!parent)!a if not
653 # dNSHostName. What should have been said is:
655 # t.naDsa = The GUID-based DNS name of s
657 # That would also be correct if transportAddressAttribute
658 # were "mailAddress" because (naDsa) can also correctly
659 # hold the SMTP ISM service address.
661 nastr
= "%s._msdcs.%s" % (s_dsa
.dsa_guid
, self
.samdb
.forest_dns_name())
663 # We're not currently supporting SMTP replication
664 # so is_smtp_replication_available() is currently
665 # always returning False
667 cn_conn
.transport_dnstr
is None or
668 cn_conn
.transport_dnstr
.find("CN=IP") == 0 or
669 not is_smtp_replication_available()):
671 if (t_repsFrom
.replica_flags
&
672 drsuapi
.DRSUAPI_DRS_MAIL_REP
) != 0x0:
673 t_repsFrom
.replica_flags
&= ~drsuapi
.DRSUAPI_DRS_MAIL_REP
675 null_guid
= misc
.GUID()
676 if (t_repsFrom
.transport_guid
is None or
677 t_repsFrom
.transport_guid
!= null_guid
):
678 t_repsFrom
.transport_guid
= null_guid
680 # See (NOTE MS-TECH INCORRECT) above
681 if t_repsFrom
.version
== 0x1:
682 if t_repsFrom
.dns_name1
is None or \
683 t_repsFrom
.dns_name1
!= nastr
:
684 t_repsFrom
.dns_name1
= nastr
686 if t_repsFrom
.dns_name1
is None or \
687 t_repsFrom
.dns_name2
is None or \
688 t_repsFrom
.dns_name1
!= nastr
or \
689 t_repsFrom
.dns_name2
!= nastr
:
690 t_repsFrom
.dns_name1
= nastr
691 t_repsFrom
.dns_name2
= nastr
694 if (t_repsFrom
.replica_flags
&
695 drsuapi
.DRSUAPI_DRS_MAIL_REP
) == 0x0:
696 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_MAIL_REP
698 # We have a transport type but its not an
699 # object in the database
700 if cn_conn
.transport_guid
not in self
.transport_table
.keys():
701 raise Exception("Missing inter-site transport - (%s)" %
702 cn_conn
.transport_dnstr
)
704 x_transport
= self
.transport_table
[str(cn_conn
.transport_guid
)]
706 if t_repsFrom
.transport_guid
!= x_transport
.guid
:
707 t_repsFrom
.transport_guid
= x_transport
.guid
709 # See (NOTE MS-TECH INCORRECT) above
710 if x_transport
.address_attr
== "dNSHostName":
712 if t_repsFrom
.version
== 0x1:
713 if t_repsFrom
.dns_name1
is None or \
714 t_repsFrom
.dns_name1
!= nastr
:
715 t_repsFrom
.dns_name1
= nastr
717 if t_repsFrom
.dns_name1
is None or \
718 t_repsFrom
.dns_name2
is None or \
719 t_repsFrom
.dns_name1
!= nastr
or \
720 t_repsFrom
.dns_name2
!= nastr
:
721 t_repsFrom
.dns_name1
= nastr
722 t_repsFrom
.dns_name2
= nastr
725 # MS tech specification says we retrieve the named
726 # attribute in "transportAddressAttribute" from the parent of
729 pdnstr
= s_dsa
.get_parent_dnstr()
730 attrs
= [ x_transport
.address_attr
]
732 res
= self
.samdb
.search(base
=pdnstr
, scope
=ldb
.SCOPE_BASE
,
734 except ldb
.LdbError
, (enum
, estr
):
736 "Unable to find attr (%s) for (%s) - (%s)" %
737 (x_transport
.address_attr
, pdnstr
, estr
))
740 nastr
= str(msg
[x_transport
.address_attr
][0])
742 # See (NOTE MS-TECH INCORRECT) above
743 if t_repsFrom
.version
== 0x1:
744 if t_repsFrom
.dns_name1
is None or \
745 t_repsFrom
.dns_name1
!= nastr
:
746 t_repsFrom
.dns_name1
= nastr
748 if t_repsFrom
.dns_name1
is None or \
749 t_repsFrom
.dns_name2
is None or \
750 t_repsFrom
.dns_name1
!= nastr
or \
751 t_repsFrom
.dns_name2
!= nastr
:
753 t_repsFrom
.dns_name1
= nastr
754 t_repsFrom
.dns_name2
= nastr
756 if t_repsFrom
.is_modified():
757 logger
.debug("modify_repsFrom(): %s" % t_repsFrom
)
759 def is_repsFrom_implied(self
, n_rep
, cn_conn
):
760 """Given a NC replica and NTDS Connection, determine if the connection
761 implies a repsFrom tuple should be present from the source DSA listed
762 in the connection to the naming context
764 :param n_rep: NC replica
765 :param conn: NTDS Connection
766 ::returns (True || False), source DSA:
768 # NTDS Connection must satisfy all the following criteria
769 # to imply a repsFrom tuple is needed:
771 # cn!enabledConnection = true.
772 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
773 # cn!fromServer references an nTDSDSA object.
776 if cn_conn
.is_enabled() and not cn_conn
.is_rodc_topology():
778 s_dnstr
= cn_conn
.get_from_dnstr()
779 if s_dnstr
is not None:
780 s_dsa
= self
.get_dsa(s_dnstr
)
782 # No DSA matching this source DN string?
786 # To imply a repsFrom tuple is needed, each of these
789 # An NC replica of the NC "is present" on the DC to
790 # which the nTDSDSA object referenced by cn!fromServer
793 # An NC replica of the NC "should be present" on
795 s_rep
= s_dsa
.get_current_replica(n_rep
.nc_dnstr
)
797 if s_rep
is None or not s_rep
.is_present():
800 # To imply a repsFrom tuple is needed, each of these
803 # The NC replica on the DC referenced by cn!fromServer is
804 # a writable replica or the NC replica that "should be
805 # present" on the local DC is a partial replica.
807 # The NC is not a domain NC, the NC replica that
808 # "should be present" on the local DC is a partial
809 # replica, cn!transportType has no value, or
810 # cn!transportType has an RDN of CN=IP.
812 implied
= (not s_rep
.is_ro() or n_rep
.is_partial()) and \
813 (not n_rep
.is_domain() or
814 n_rep
.is_partial() or
815 cn_conn
.transport_dnstr
is None or
816 cn_conn
.transport_dnstr
.find("CN=IP") == 0)
823 def translate_ntdsconn(self
):
824 """This function adjusts values of repsFrom abstract attributes of NC
825 replicas on the local DC to match those implied by
826 nTDSConnection objects.
828 logger
.debug("translate_ntdsconn(): enter")
830 if self
.my_dsa
.is_translate_ntdsconn_disabled():
833 current_rep_table
, needed_rep_table
= self
.my_dsa
.get_rep_tables()
835 # Filled in with replicas we currently have that need deleting
836 delete_rep_table
= {}
838 # We're using the MS notation names here to allow
839 # correlation back to the published algorithm.
841 # n_rep - NC replica (n)
842 # t_repsFrom - tuple (t) in n!repsFrom
843 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
844 # object (s) such that (s!objectGUID = t.uuidDsa)
845 # In our IDL representation of repsFrom the (uuidDsa)
846 # attribute is called (source_dsa_obj_guid)
847 # cn_conn - (cn) is nTDSConnection object and child of the local DC's
848 # nTDSDSA object and (cn!fromServer = s)
849 # s_rep - source DSA replica of n
851 # If we have the replica and its not needed
852 # then we add it to the "to be deleted" list.
853 for dnstr
, n_rep
in current_rep_table
.items():
854 if dnstr
not in needed_rep_table
.keys():
855 delete_rep_table
[dnstr
] = n_rep
857 # Now perform the scan of replicas we'll need
858 # and compare any current repsFrom against the
860 for dnstr
, n_rep
in needed_rep_table
.items():
862 # load any repsFrom and fsmo roles as we'll
863 # need them during connection translation
864 n_rep
.load_repsFrom(self
.samdb
)
865 n_rep
.load_fsmo_roles(self
.samdb
)
867 # Loop thru the existing repsFrom tupples (if any)
868 for i
, t_repsFrom
in enumerate(n_rep
.rep_repsFrom
):
870 # for each tuple t in n!repsFrom, let s be the nTDSDSA
871 # object such that s!objectGUID = t.uuidDsa
872 guidstr
= str(t_repsFrom
.source_dsa_obj_guid
)
873 s_dsa
= self
.get_dsa_by_guidstr(guidstr
)
875 # Source dsa is gone from config (strange)
876 # so cleanup stale repsFrom for unlisted DSA
878 logger
.debug("repsFrom source DSA guid (%s) not found" %
880 t_repsFrom
.to_be_deleted
= True
883 s_dnstr
= s_dsa
.dsa_dnstr
885 # Retrieve my DSAs connection object (if it exists)
886 # that specifies the fromServer equivalent to
887 # the DSA that is specified in the repsFrom source
888 cn_conn
= self
.my_dsa
.get_connection_by_from_dnstr(s_dnstr
)
890 # Let (cn) be the nTDSConnection object such that (cn)
891 # is a child of the local DC's nTDSDSA object and
892 # (cn!fromServer = s) and (cn!options) does not contain
893 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
894 if cn_conn
and cn_conn
.is_rodc_topology():
897 # KCC removes this repsFrom tuple if any of the following
901 # No NC replica of the NC "is present" on DSA that
902 # would be source of replica
904 # A writable replica of the NC "should be present" on
905 # the local DC, but a partial replica "is present" on
907 s_rep
= s_dsa
.get_current_replica(n_rep
.nc_dnstr
)
909 if cn_conn
is None or \
910 s_rep
is None or not s_rep
.is_present() or \
911 (not n_rep
.is_ro() and s_rep
.is_partial()):
913 t_repsFrom
.to_be_deleted
= True
916 # If the KCC did not remove t from n!repsFrom, it updates t
917 self
.modify_repsFrom(n_rep
, t_repsFrom
, s_rep
, s_dsa
, cn_conn
)
919 # Loop thru connections and add implied repsFrom tuples
920 # for each NTDSConnection under our local DSA if the
921 # repsFrom is not already present
922 for cn_dnstr
, cn_conn
in self
.my_dsa
.connect_table
.items():
924 implied
, s_dsa
= self
.is_repsFrom_implied(n_rep
, cn_conn
)
928 # Loop thru the existing repsFrom tupples (if any) and
929 # if we already have a tuple for this connection then
930 # no need to proceed to add. It will have been changed
931 # to have the correct attributes above
932 for i
, t_repsFrom
in enumerate(n_rep
.rep_repsFrom
):
934 guidstr
= str(t_repsFrom
.source_dsa_obj_guid
)
935 if s_dsa
is self
.get_dsa_by_guidstr(guidstr
):
942 # Create a new RepsFromTo and proceed to modify
943 # it according to specification
944 t_repsFrom
= RepsFromTo(n_rep
.nc_dnstr
)
946 t_repsFrom
.source_dsa_obj_guid
= s_dsa
.dsa_guid
948 s_rep
= s_dsa
.get_current_replica(n_rep
.nc_dnstr
)
950 self
.modify_repsFrom(n_rep
, t_repsFrom
, s_rep
, s_dsa
, cn_conn
)
952 # Add to our NC repsFrom as this is newly computed
953 if t_repsFrom
.is_modified():
954 n_rep
.rep_repsFrom
.append(t_repsFrom
)
957 # Display any to be deleted or modified repsFrom
958 text
= n_rep
.dumpstr_to_be_deleted()
960 logger
.info("TO BE DELETED:\n%s" % text
)
961 text
= n_rep
.dumpstr_to_be_modified()
963 logger
.info("TO BE MODIFIED:\n%s" % text
)
965 # Peform deletion from our tables but perform
966 # no database modification
967 n_rep
.commit_repsFrom(self
.samdb
, ro
=True)
969 # Commit any modified repsFrom to the NC replica
970 n_rep
.commit_repsFrom(self
.samdb
)
972 def keep_connection(self
, cn_conn
):
973 """Determines if the connection is meant to be kept during the
974 pruning of unneeded connections operation.
976 Consults the keep_connection_list[] which was built during
977 intersite NC replica graph computation.
979 ::returns (True or False): if (True) connection should not be pruned
981 if cn_conn
in self
.keep_connection_list
:
985 def merge_failed_links(self
):
986 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
987 The KCC on a writable DC attempts to merge the link and connection
988 failure information from bridgehead DCs in its own site to help it
989 identify failed bridgehead DCs.
991 # MS-TECH Ref 6.2.2.3.2 Merge of kCCFailedLinks and kCCFailedLinks
994 # 1. Queries every bridgehead server in your site (other than yourself)
995 # 2. For every ntDSConnection that references a server in a different
996 # site merge all the failure info
998 # XXX - not implemented yet
1000 def setup_graph(self
, part
):
1001 """Set up a GRAPH, populated with a VERTEX for each site
1002 object, a MULTIEDGE for each siteLink object, and a
1003 MUTLIEDGESET for each siteLinkBridge object (or implied
1006 ::returns: a new graph
1010 g
= IntersiteGraph()
1012 for site_guid
, site
in self
.site_table
.items():
1013 vertex
= Vertex(site
, part
)
1014 vertex
.guid
= site_guid
1015 g
.vertices
.add(vertex
)
1017 if not guid_to_vertex
.get(site_guid
):
1018 guid_to_vertex
[site_guid
] = []
1020 guid_to_vertex
[site_guid
].append(vertex
)
1022 connected_vertices
= set()
1023 for transport_guid
, transport
in self
.transport_table
.items():
1024 # Currently only ever "IP"
1025 for site_link_dn
, site_link
in self
.sitelink_table
.items():
1026 new_edge
= create_edge(transport_guid
, site_link
, guid_to_vertex
)
1027 connected_vertices
.update(new_edge
.vertices
)
1028 g
.edges
.add(new_edge
)
1030 # If 'Bridge all site links' is enabled and Win2k3 bridges required is not set
1031 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1032 # No documentation for this however, ntdsapi.h appears to have listed:
1033 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1034 if ((self
.my_site
.site_options
& 0x00000002) == 0
1035 and (self
.my_site
.site_options
& 0x00001000) == 0):
1036 g
.edge_set
.add(create_auto_edge_set(g
, transport_guid
))
1038 # TODO get all site link bridges
1039 for site_link_bridge
in []:
1040 g
.edge_set
.add(create_edge_set(g
, transport_guid
,
1043 g
.connected_vertices
= connected_vertices
1047 def get_bridgehead(self
, site
, part
, transport
, partial_ok
, detect_failed
):
1048 """Get a bridghead DC.
1050 :param site: site object representing for which a bridgehead
1052 :param part: crossRef for NC to replicate.
1053 :param transport: interSiteTransport object for replication
1055 :param partial_ok: True if a DC containing a partial
1056 replica or a full replica will suffice, False if only
1057 a full replica will suffice.
1058 :param detect_failed: True to detect failed DCs and route
1059 replication traffic around them, False to assume no DC
1061 ::returns: dsa object for the bridgehead DC or None
1064 bhs
= self
.get_all_bridgeheads(site
, part
, transport
,
1065 partial_ok
, detect_failed
)
1067 logger
.debug("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
1071 logger
.debug("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
1072 (site
.site_dnstr
, bhs
[0].dsa_dnstr
))
1075 def get_all_bridgeheads(self
, site
, part
, transport
,
1076 partial_ok
, detect_failed
):
1077 """Get all bridghead DCs satisfying the given criteria
1079 :param site: site object representing the site for which
1080 bridgehead DCs are desired.
1081 :param part: partition for NC to replicate.
1082 :param transport: interSiteTransport object for
1083 replication traffic.
1084 :param partial_ok: True if a DC containing a partial
1085 replica or a full replica will suffice, False if
1086 only a full replica will suffice.
1087 :param detect_ok: True to detect failed DCs and route
1088 replication traffic around them, FALSE to assume
1090 ::returns: list of dsa object for available bridgehead
1096 logger
.debug("get_all_bridgeheads: %s" % transport
)
1098 for key
, dsa
in site
.dsa_table
.items():
1100 pdnstr
= dsa
.get_parent_dnstr()
1102 # IF t!bridgeheadServerListBL has one or more values and
1103 # t!bridgeheadServerListBL does not contain a reference
1104 # to the parent object of dc then skip dc
1105 if (len(transport
.bridgehead_list
) != 0 and
1106 pdnstr
not in transport
.bridgehead_list
):
1109 # IF dc is in the same site as the local DC
1110 # IF a replica of cr!nCName is not in the set of NC replicas
1111 # that "should be present" on dc or a partial replica of the
1112 # NC "should be present" but partialReplicasOkay = FALSE
1114 if self
.my_site
.same_site(dsa
):
1115 needed
, ro
, partial
= part
.should_be_present(dsa
)
1116 if not needed
or (partial
and not partial_ok
):
1120 # IF an NC replica of cr!nCName is not in the set of NC
1121 # replicas that "are present" on dc or a partial replica of
1122 # the NC "is present" but partialReplicasOkay = FALSE
1125 rep
= dsa
.get_current_replica(part
.nc_dnstr
)
1126 if rep
is None or (rep
.is_partial() and not partial_ok
):
1129 # IF AmIRODC() and cr!nCName corresponds to default NC then
1130 # Let dsaobj be the nTDSDSA object of the dc
1131 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1133 if self
.my_dsa
.is_ro() and part
.is_default():
1134 if not dsa
.is_minimum_behavior(dsdb
.DS_DOMAIN_FUNCTION_2008
):
1137 # IF t!name != "IP" and the parent object of dc has no value for
1138 # the attribute specified by t!transportAddressAttribute
1140 if transport
.name
!= "IP":
1141 # MS tech specification says we retrieve the named
1142 # attribute in "transportAddressAttribute" from the parent
1145 attrs
= [ transport
.address_attr
]
1147 res
= self
.samdb
.search(base
=pdnstr
, scope
=ldb
.SCOPE_BASE
,
1149 except ldb
.LdbError
, (enum
, estr
):
1153 if transport
.address_attr
not in msg
:
1156 nastr
= str(msg
[transport
.address_attr
][0])
1158 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1160 if self
.is_bridgehead_failed(dsa
, detect_failed
):
1163 logger
.debug("get_all_bridgeheads: dsadn=%s" % dsa
.dsa_dnstr
)
1166 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1168 # SORT bhs such that all GC servers precede DCs that are not GC
1169 # servers, and otherwise by ascending objectGUID
1171 # SORT bhs in a random order
1172 if site
.is_random_bridgehead_disabled():
1173 bhs
.sort(sort_dsa_by_gc_and_guid
)
1180 def is_bridgehead_failed(self
, dsa
, detect_failed
):
1181 """Determine whether a given DC is known to be in a failed state
1182 ::returns: True if and only if the DC should be considered failed
1184 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1185 # When DETECT_STALE_DISABLED, we can never know of if it's in a failed state
1186 if self
.my_site
.site_options
& 0x00000008:
1188 elif self
.is_stale_link_connection(dsa
):
1191 return detect_failed
1193 def create_connection(self
, part
, rbh
, rsite
, transport
,
1194 lbh
, lsite
, link_opt
, link_sched
,
1195 partial_ok
, detect_failed
):
1196 """Create an nTDSConnection object with the given parameters
1197 if one does not already exist.
1199 :param part: crossRef object for the NC to replicate.
1200 :param rbh: nTDSDSA object for DC to act as the
1201 IDL_DRSGetNCChanges server (which is in a site other
1202 than the local DC's site).
1203 :param rsite: site of the rbh
1204 :param transport: interSiteTransport object for the transport
1205 to use for replication traffic.
1206 :param lbh: nTDSDSA object for DC to act as the
1207 IDL_DRSGetNCChanges client (which is in the local DC's site).
1208 :param lsite: site of the lbh
1209 :param link_opt: Replication parameters (aggregated siteLink options, etc.)
1210 :param link_sched: Schedule specifying the times at which
1211 to begin replicating.
1212 :partial_ok: True if bridgehead DCs containing partial
1213 replicas of the NC are acceptable.
1214 :param detect_failed: True to detect failed DCs and route
1215 replication traffic around them, FALSE to assume no DC
1218 rbhs_all
= self
.get_all_bridgeheads(rsite
, part
, transport
,
1221 # MS-TECH says to compute rbhs_avail but then doesn't use it
1222 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1223 # partial_ok, detect_failed)
1225 lbhs_all
= self
.get_all_bridgeheads(lsite
, part
, transport
,
1228 # MS-TECH says to compute lbhs_avail but then doesn't use it
1229 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1230 # partial_ok, detect_failed)
1232 # FOR each nTDSConnection object cn such that the parent of cn is
1233 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1234 for ldsa
in lbhs_all
:
1235 for cn
in ldsa
.connect_table
.values():
1238 for rdsa
in rbhs_all
:
1239 if cn
.from_dnstr
== rdsa
.dsa_dnstr
:
1245 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1246 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1247 # cn!transportType references t
1248 if (cn
.is_generated() and not cn
.is_rodc_topology() and
1249 cn
.transport_guid
== transport
.guid
):
1251 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1252 # cn!options and cn!schedule != sch
1253 # Perform an originating update to set cn!schedule to
1255 if (not cn
.is_user_owned_schedule() and
1256 not cn
.is_equivalent_schedule(link_sched
)):
1257 cn
.schedule
= link_sched
1258 cn
.set_modified(True)
1260 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1261 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1262 if cn
.is_override_notify_default() and \
1265 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1267 # Perform an originating update to clear bits
1268 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1269 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1270 if (link_opt
& dsdb
.NTDSSITELINK_OPT_USE_NOTIFY
) == 0:
1272 ~
(dsdb
.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1273 dsdb
.NTDSCONN_OPT_USE_NOTIFY
)
1274 cn
.set_modified(True)
1279 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1281 # Perform an originating update to set bits
1282 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1283 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1284 if (link_opt
& dsdb
.NTDSSITELINK_OPT_USE_NOTIFY
) != 0:
1286 (dsdb
.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1287 dsdb
.NTDSCONN_OPT_USE_NOTIFY
)
1288 cn
.set_modified(True)
1291 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1292 if cn
.is_twoway_sync():
1294 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1296 # Perform an originating update to clear bit
1297 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1298 if (link_opt
& dsdb
.NTDSSITELINK_OPT_TWOWAY_SYNC
) == 0:
1299 cn
.options
&= ~dsdb
.NTDSCONN_OPT_TWOWAY_SYNC
1300 cn
.set_modified(True)
1305 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1307 # Perform an originating update to set bit
1308 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1309 if (link_opt
& dsdb
.NTDSSITELINK_OPT_TWOWAY_SYNC
) != 0:
1310 cn
.options |
= dsdb
.NTDSCONN_OPT_TWOWAY_SYNC
1311 cn
.set_modified(True)
1314 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1316 if cn
.is_intersite_compression_disabled():
1318 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1320 # Perform an originating update to clear bit
1321 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1324 dsdb
.NTDSSITELINK_OPT_DISABLE_COMPRESSION
) == 0:
1326 ~dsdb
.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1327 cn
.set_modified(True)
1331 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1333 # Perform an originating update to set bit
1334 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1337 dsdb
.NTDSSITELINK_OPT_DISABLE_COMPRESSION
) != 0:
1339 dsdb
.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1340 cn
.set_modified(True)
1342 # Display any modified connection
1344 if cn
.to_be_modified
:
1345 logger
.info("TO BE MODIFIED:\n%s" % cn
)
1347 ldsa
.commit_connections(self
.samdb
, ro
=True)
1349 ldsa
.commit_connections(self
.samdb
)
1352 valid_connections
= 0
1354 # FOR each nTDSConnection object cn such that cn!parent is
1355 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1356 for ldsa
in lbhs_all
:
1357 for cn
in ldsa
.connect_table
.values():
1360 for rdsa
in rbhs_all
:
1361 if cn
.from_dnstr
== rdsa
.dsa_dnstr
:
1367 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1368 # cn!transportType references t) and
1369 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1370 if ((not cn
.is_generated() or
1371 cn
.transport_guid
== transport
.guid
) and
1372 not cn
.is_rodc_topology()):
1374 # LET rguid be the objectGUID of the nTDSDSA object
1375 # referenced by cn!fromServer
1376 # LET lguid be (cn!parent)!objectGUID
1378 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1379 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1380 # Increment cValidConnections by 1
1381 if (not self
.is_bridgehead_failed(rdsa
, detect_failed
) and
1382 not self
.is_bridgehead_failed(ldsa
, detect_failed
)):
1383 valid_connections
+= 1
1385 # IF keepConnections does not contain cn!objectGUID
1386 # APPEND cn!objectGUID to keepConnections
1387 if not self
.keep_connection(cn
):
1388 self
.keep_connection_list
.append(cn
)
1392 # IF cValidConnections = 0
1393 if valid_connections
== 0:
1395 # LET opt be NTDSCONN_OPT_IS_GENERATED
1396 opt
= dsdb
.NTDSCONN_OPT_IS_GENERATED
1398 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1399 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1400 # NTDSCONN_OPT_USE_NOTIFY in opt
1401 if (link_opt
& dsdb
.NTDSSITELINK_OPT_USE_NOTIFY
) != 0:
1402 opt |
= (dsdb
.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1403 dsdb
.NTDSCONN_OPT_USE_NOTIFY
)
1405 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1406 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1407 if (link_opt
& dsdb
.NTDSSITELINK_OPT_TWOWAY_SYNC
) != 0:
1408 opt |
= dsdb
.NTDSCONN_OPT_TWOWAY_SYNC
1410 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1412 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1414 dsdb
.NTDSSITELINK_OPT_DISABLE_COMPRESSION
) != 0:
1415 opt |
= dsdb
.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1417 # Perform an originating update to create a new nTDSConnection
1418 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1419 # cn!options = opt, cn!transportType is a reference to t,
1420 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1421 cn
= lbh
.new_connection(opt
, 0, transport
, lbh
.dsa_dnstr
, link_sched
)
1423 # Display any added connection
1426 logger
.info("TO BE ADDED:\n%s" % cn
)
1428 lbh
.commit_connections(self
.samdb
, ro
=True)
1430 lbh
.commit_connections(self
.samdb
)
1432 # APPEND cn!objectGUID to keepConnections
1433 if not self
.keep_connection(cn
):
1434 self
.keep_connection_list
.append(cn
)
1436 def add_transports(self
, vertex
, local_vertex
, graph
, detect_failed
):
1437 vertex
.accept_red_red
= []
1438 vertex
.accept_black
= []
1439 found_failed
= False
1440 for t_guid
, transport
in self
.transport_table
.items():
1441 if transport
.name
!= 'IP':
1443 logger
.debug(t_guid
)
1444 # FLAG_CR_NTDS_DOMAIN 0x00000002
1445 if (vertex
.is_red() and transport
.name
!= "IP" and
1446 vertex
.part
.system_flags
& 0x00000002):
1449 if vertex
not in graph
.connected_vertices
:
1452 partial_replica_okay
= vertex
.is_black()
1455 # bh = self.get_bridgehead(local_vertex.site, vertex.part, transport,
1456 bh
= self
.get_bridgehead(vertex
.site
, vertex
.part
, transport
,
1457 partial_replica_okay
, detect_failed
)
1462 vertex
.accept_red_red
.append(t_guid
)
1463 vertex
.accept_black
.append(t_guid
)
1465 # Add additional transport to allow another run of Dijkstra
1466 vertex
.accept_red_red
.append("EDGE_TYPE_ALL")
1467 vertex
.accept_black
.append("EDGE_TYPE_ALL")
1471 def create_connections(self
, graph
, part
, detect_failed
):
1472 """Construct an NC replica graph for the NC identified by
1473 the given crossRef, then create any additional nTDSConnection
1476 :param graph: site graph.
1477 :param part: crossRef object for NC.
1478 :param detect_failed: True to detect failed DCs and route
1479 replication traffic around them, False to assume no DC
1482 Modifies self.keep_connection_list by adding any connections
1483 deemed to be "in use".
1485 ::returns: (all_connected, found_failed_dc)
1486 (all_connected) True if the resulting NC replica graph
1487 connects all sites that need to be connected.
1488 (found_failed_dc) True if one or more failed DCs were
1491 all_connected
= True
1492 found_failed
= False
1494 logger
.debug("create_connections(): enter\n\tpartdn=%s\n\tdetect_failed=%s" %
1495 (part
.nc_dnstr
, detect_failed
))
1497 # XXX - This is a highly abbreviated function from the MS-TECH
1498 # ref. It creates connections between bridgeheads to all
1499 # sites that have appropriate replicas. Thus we are not
1500 # creating a minimum cost spanning tree but instead
1501 # producing a fully connected tree. This should produce
1502 # a full (albeit not optimal cost) replication topology.
1504 my_vertex
= Vertex(self
.my_site
, part
)
1505 my_vertex
.color_vertex()
1507 for v
in graph
.vertices
:
1509 if self
.add_transports(v
, my_vertex
, graph
, False):
1512 # No NC replicas for this NC in the site of the local DC,
1513 # so no nTDSConnection objects need be created
1514 if my_vertex
.is_white():
1515 return all_connected
, found_failed
1517 edge_list
, component_count
= self
.get_spanning_tree_edges(graph
)
1519 if component_count
> 1:
1520 all_connected
= False
1522 # LET partialReplicaOkay be TRUE if and only if
1523 # localSiteVertex.Color = COLOR.BLACK
1524 if my_vertex
.is_black():
1529 # Utilize the IP transport only for now
1531 for transport
in self
.transport_table
.values():
1532 if transport
.name
== "IP":
1535 if transport
is None:
1536 raise Exception("Unable to find inter-site transport for IP")
1539 if e
.directed
and e
.vertices
[0].site
is self
.my_site
: # more accurate comparison?
1542 if e
.vertices
[0].site
is self
.my_site
:
1543 rsite
= e
.vertices
[1].site
1545 rsite
= e
.vertices
[0].site
1547 # We don't make connections to our own site as that
1548 # is intrasite topology generator's job
1549 if rsite
is self
.my_site
:
1552 # Determine bridgehead server in remote site
1553 rbh
= self
.get_bridgehead(rsite
, part
, transport
,
1554 partial_ok
, detect_failed
)
1556 # RODC acts as an BH for itself
1558 # LET lbh be the nTDSDSA object of the local DC
1560 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1561 # cr, t, partialReplicaOkay, detectFailedDCs)
1562 if self
.my_dsa
.is_ro():
1563 lsite
= self
.my_site
1566 lsite
= self
.my_site
1567 lbh
= self
.get_bridgehead(lsite
, part
, transport
,
1568 partial_ok
, detect_failed
)
1573 sitelink
= e
.site_link
1574 if sitelink
is None:
1578 link_opt
= sitelink
.options
1579 link_sched
= sitelink
.schedule
1581 self
.create_connection(part
, rbh
, rsite
, transport
,
1582 lbh
, lsite
, link_opt
, link_sched
,
1583 partial_ok
, detect_failed
)
1585 return all_connected
, found_failed
1587 def create_intersite_connections(self
):
1588 """Computes an NC replica graph for each NC replica that "should be
1589 present" on the local DC or "is present" on any DC in the same site
1590 as the local DC. For each edge directed to an NC replica on such a
1591 DC from an NC replica on a DC in another site, the KCC creates an
1592 nTDSConnection object to imply that edge if one does not already
1595 Modifies self.keep_connection_list - A list of nTDSConnection
1596 objects for edges that are directed
1597 to the local DC's site in one or more NC replica graphs.
1599 returns: True if spanning trees were created for all NC replica
1600 graphs, otherwise False.
1602 all_connected
= True
1603 self
.keep_connection_list
= []
1605 # LET crossRefList be the set containing each object o of class
1606 # crossRef such that o is a child of the CN=Partitions child of the
1609 # FOR each crossRef object cr in crossRefList
1610 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1611 # is clear in cr!systemFlags, skip cr.
1612 # LET g be the GRAPH return of SetupGraph()
1614 for part
in self
.part_table
.values():
1616 if not part
.is_enabled():
1619 if part
.is_foreign():
1622 graph
= self
.setup_graph(part
)
1624 # Create nTDSConnection objects, routing replication traffic
1625 # around "failed" DCs.
1626 found_failed
= False
1628 connected
, found_failed
= self
.create_connections(graph
, part
, True)
1631 all_connected
= False
1634 # One or more failed DCs preclude use of the ideal NC
1635 # replica graph. Add connections for the ideal graph.
1636 self
.create_connections(graph
, part
, False)
1638 return all_connected
1640 def get_spanning_tree_edges(self
, graph
):
1641 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
1642 # just the shortest-paths connecting colored vertices
1644 internal_edges
= set()
1646 for e_set
in graph
.edge_set
:
1648 for v
in graph
.vertices
:
1651 # All con_type in an edge set is the same
1652 for e
in e_set
.edges
:
1653 edgeType
= e
.con_type
1654 for v
in e
.vertices
:
1657 # Run dijkstra's algorithm with just the red vertices as seeds
1658 # Seed from the full replicas
1659 dijkstra(graph
, edgeType
, False)
1662 process_edge_set(graph
, e_set
, internal_edges
)
1664 # Run dijkstra's algorithm with red and black vertices as the seeds
1665 # Seed from both full and partial replicas
1666 dijkstra(graph
, edgeType
, True)
1669 process_edge_set(graph
, e_set
, internal_edges
)
1671 # All vertices have root/component as itself
1672 setup_vertices(graph
)
1673 process_edge_set(graph
, None, internal_edges
)
1675 # Phase 2: Run Kruskal's on the internal edges
1676 output_edges
, components
= kruskal(graph
, internal_edges
)
1678 # This recalculates the cost for the path connecting the closest red vertex
1679 # Ignoring types is fine because NO suboptimal edge should exist in the graph
1680 dijkstra(graph
, "EDGE_TYPE_ALL", False) # TODO rename
1681 # Phase 3: Process the output
1682 for v
in graph
.vertices
:
1686 v
.dist_to_red
= v
.repl_info
.cost
1688 # count the components
1689 return self
.copy_output_edges(graph
, output_edges
), components
1691 # This ensures only one-way connections for partial-replicas
1692 def copy_output_edges(self
, graph
, output_edges
):
1694 vid
= self
.my_site
# object guid for the local dc's site
1696 for edge
in output_edges
:
1697 # Three-way edges are no problem here since these were created by
1698 # add_out_edge which only has two endpoints
1699 v
= edge
.vertices
[0]
1700 w
= edge
.vertices
[1]
1701 if v
.site
is vid
or w
.site
is vid
:
1702 if (v
.is_black() or w
.is_black()) and not v
.dist_to_red
== MAX_DWORD
:
1703 edge
.directed
= True
1705 if w
.dist_to_red
< v
.dist_to_red
:
1706 edge
.vertices
[0] = w
1707 edge
.vertices
[1] = v
1709 edge_list
.append(edge
)
1713 def intersite(self
):
1714 """The head method for generating the inter-site KCC replica
1715 connection graph and attendant nTDSConnection objects
1718 Produces self.keep_connection_list[] of NTDS Connections
1719 that should be kept during subsequent pruning process.
1721 ::return (True or False): (True) if the produced NC replica
1722 graph connects all sites that need to be connected
1727 mysite
= self
.my_site
1728 all_connected
= True
1730 logger
.debug("intersite(): enter")
1732 # Determine who is the ISTG
1734 mysite
.select_istg(self
.samdb
, mydsa
, ro
=True)
1736 mysite
.select_istg(self
.samdb
, mydsa
, ro
=False)
1738 # Test whether local site has topology disabled
1739 if mysite
.is_intersite_topology_disabled():
1740 logger
.debug("intersite(): exit disabled all_connected=%d" %
1742 return all_connected
1744 if not mydsa
.is_istg():
1745 logger
.debug("intersite(): exit not istg all_connected=%d" %
1747 return all_connected
1749 self
.merge_failed_links()
1751 # For each NC with an NC replica that "should be present" on the
1752 # local DC or "is present" on any DC in the same site as the
1753 # local DC, the KCC constructs a site graph--a precursor to an NC
1754 # replica graph. The site connectivity for a site graph is defined
1755 # by objects of class interSiteTransport, siteLink, and
1756 # siteLinkBridge in the config NC.
1758 all_connected
= self
.create_intersite_connections()
1760 logger
.debug("intersite(): exit all_connected=%d" % all_connected
)
1761 return all_connected
1763 def update_rodc_connection(self
):
1764 """Runs when the local DC is an RODC and updates the RODC NTFRS
1767 # Given an nTDSConnection object cn1, such that cn1.options contains
1768 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1769 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1770 # that the following is true:
1772 # cn1.fromServer = cn2.fromServer
1773 # cn1.schedule = cn2.schedule
1775 # If no such cn2 can be found, cn1 is not modified.
1776 # If no such cn1 can be found, nothing is modified by this task.
1778 if not self
.my_dsa
.is_ro():
1782 # Find cn2 - the DRS NTDSConnection
1783 for con
in self
.my_dsa
.connect_table
.values():
1784 if not con
.is_rodc_topology():
1788 # Find cn1 - the FRS NTDSConnection
1790 for con
in self
.my_dsa
.connect_table
.values():
1791 if con
.is_rodc_topology():
1792 con
.from_dnstr
= cn2
.from_dnstr
1793 con
.schedule
= cn2
.schedule
1794 con
.to_be_modified
= True
1796 # Commit changes to the database
1797 self
.my_dsa
.commit_connections(self
.samdb
, ro
=opts
.readonly
)
1799 def intrasite_max_node_edges(self
, node_count
):
1800 """Returns the maximum number of edges directed to a node in
1801 the intrasite replica graph.
1803 The KCC does not create more
1804 than 50 edges directed to a single DC. To optimize replication,
1805 we compute that each node should have n+2 total edges directed
1806 to it such that (n) is the smallest non-negative integer
1807 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1809 :param node_count: total number of nodes in the replica graph
1813 if node_count
<= (2 * (n
* n
) + (6 * n
) + 7):
1821 def construct_intrasite_graph(self
, site_local
, dc_local
,
1822 nc_x
, gc_only
, detect_stale
):
1824 # We're using the MS notation names here to allow
1825 # correlation back to the published algorithm.
1827 # nc_x - naming context (x) that we are testing if it
1828 # "should be present" on the local DC
1829 # f_of_x - replica (f) found on a DC (s) for NC (x)
1830 # dc_s - DC where f_of_x replica was found
1831 # dc_local - local DC that potentially needs a replica
1833 # r_list - replica list R
1834 # p_of_x - replica (p) is partial and found on a DC (s)
1836 # l_of_x - replica (l) is the local replica for NC (x)
1837 # that should appear on the local DC
1838 # r_len = is length of replica list |R|
1840 # If the DSA doesn't need a replica for this
1841 # partition (NC x) then continue
1842 needed
, ro
, partial
= nc_x
.should_be_present(dc_local
)
1844 logger
.debug("construct_intrasite_graph(): enter" +
1845 "\n\tgc_only=%d" % gc_only
+
1846 "\n\tdetect_stale=%d" % detect_stale
+
1847 "\n\tneeded=%s" % needed
+
1849 "\n\tpartial=%s" % partial
+
1855 # Create a NCReplica that matches what the local replica
1856 # should say. We'll use this below in our r_list
1857 l_of_x
= NCReplica(dc_local
.dsa_dnstr
, dc_local
.dsa_guid
,
1860 l_of_x
.identify_by_basedn(self
.samdb
)
1862 l_of_x
.rep_partial
= partial
1865 # Add this replica that "should be present" to the
1866 # needed replica table for this DSA
1867 dc_local
.add_needed_replica(l_of_x
)
1869 # Empty replica sequence list
1872 # We'll loop thru all the DSAs looking for
1873 # writeable NC replicas that match the naming
1874 # context dn for (nc_x)
1876 for dc_s_dn
, dc_s
in self
.my_site
.dsa_table
.items():
1878 # If this partition (nc_x) doesn't appear as a
1879 # replica (f_of_x) on (dc_s) then continue
1880 if not nc_x
.nc_dnstr
in dc_s
.current_rep_table
.keys():
1883 # Pull out the NCReplica (f) of (x) with the dn
1884 # that matches NC (x) we are examining.
1885 f_of_x
= dc_s
.current_rep_table
[nc_x
.nc_dnstr
]
1887 # Replica (f) of NC (x) must be writable
1891 # Replica (f) of NC (x) must satisfy the
1892 # "is present" criteria for DC (s) that
1894 if not f_of_x
.is_present():
1897 # DC (s) must be a writable DSA other than
1898 # my local DC. In other words we'd only replicate
1899 # from other writable DC
1900 if dc_s
.is_ro() or dc_s
is dc_local
:
1903 # Certain replica graphs are produced only
1904 # for global catalogs, so test against
1905 # method input parameter
1906 if gc_only
and not dc_s
.is_gc():
1909 # DC (s) must be in the same site as the local DC
1910 # as this is the intra-site algorithm. This is
1911 # handled by virtue of placing DSAs in per
1912 # site objects (see enclosing for() loop)
1914 # If NC (x) is intended to be read-only full replica
1915 # for a domain NC on the target DC then the source
1916 # DC should have functional level at minimum WIN2008
1918 # Effectively we're saying that in order to replicate
1919 # to a targeted RODC (which was introduced in Windows 2008)
1920 # then we have to replicate from a DC that is also minimally
1923 # You can also see this requirement in the MS special
1924 # considerations for RODC which state that to deploy
1925 # an RODC, at least one writable domain controller in
1926 # the domain must be running Windows Server 2008
1927 if ro
and not partial
and nc_x
.nc_type
== NCType
.domain
:
1928 if not dc_s
.is_minimum_behavior(dsdb
.DS_DOMAIN_FUNCTION_2008
):
1931 # If we haven't been told to turn off stale connection
1932 # detection and this dsa has a stale connection then
1934 if detect_stale
and self
.is_stale_link_connection(dc_s
):
1937 # Replica meets criteria. Add it to table indexed
1938 # by the GUID of the DC that it appears on
1939 r_list
.append(f_of_x
)
1941 # If a partial (not full) replica of NC (x) "should be present"
1942 # on the local DC, append to R each partial replica (p of x)
1943 # such that p "is present" on a DC satisfying the same
1944 # criteria defined above for full replica DCs.
1947 # Now we loop thru all the DSAs looking for
1948 # partial NC replicas that match the naming
1949 # context dn for (NC x)
1950 for dc_s_dn
, dc_s
in self
.my_site
.dsa_table
.items():
1952 # If this partition NC (x) doesn't appear as a
1953 # replica (p) of NC (x) on the dsa DC (s) then
1955 if not nc_x
.nc_dnstr
in dc_s
.current_rep_table
.keys():
1958 # Pull out the NCReplica with the dn that
1959 # matches NC (x) we are examining.
1960 p_of_x
= dc_s
.current_rep_table
[nc_x
.nc_dnstr
]
1962 # Replica (p) of NC (x) must be partial
1963 if not p_of_x
.is_partial():
1966 # Replica (p) of NC (x) must satisfy the
1967 # "is present" criteria for DC (s) that
1969 if not p_of_x
.is_present():
1972 # DC (s) must be a writable DSA other than
1973 # my DSA. In other words we'd only replicate
1974 # from other writable DSA
1975 if dc_s
.is_ro() or dc_s
is dc_local
:
1978 # Certain replica graphs are produced only
1979 # for global catalogs, so test against
1980 # method input parameter
1981 if gc_only
and not dc_s
.is_gc():
1984 # DC (s) must be in the same site as the local DC
1985 # as this is the intra-site algorithm. This is
1986 # handled by virtue of placing DSAs in per
1987 # site objects (see enclosing for() loop)
1989 # This criteria is moot (a no-op) for this case
1990 # because we are scanning for (partial = True). The
1991 # MS algorithm statement says partial replica scans
1992 # should adhere to the "same" criteria as full replica
1993 # scans so the criteria doesn't change here...its just
1994 # rendered pointless.
1996 # The case that is occurring would be a partial domain
1997 # replica is needed on a local DC global catalog. There
1998 # is no minimum windows behavior for those since GCs
1999 # have always been present.
2000 if ro
and not partial
and nc_x
.nc_type
== NCType
.domain
:
2001 if not dc_s
.is_minimum_behavior(dsdb
.DS_DOMAIN_FUNCTION_2008
):
2004 # If we haven't been told to turn off stale connection
2005 # detection and this dsa has a stale connection then
2007 if detect_stale
and self
.is_stale_link_connection(dc_s
):
2010 # Replica meets criteria. Add it to table indexed
2011 # by the GUID of the DSA that it appears on
2012 r_list
.append(p_of_x
)
2014 # Append to R the NC replica that "should be present"
2016 r_list
.append(l_of_x
)
2018 r_list
.sort(sort_replica_by_dsa_guid
)
2022 max_node_edges
= self
.intrasite_max_node_edges(r_len
)
2024 # Add a node for each r_list element to the replica graph
2027 node
= GraphNode(rep
.rep_dsa_dnstr
, max_node_edges
)
2028 graph_list
.append(node
)
2030 # For each r(i) from (0 <= i < |R|-1)
2032 while i
< (r_len
-1):
2033 # Add an edge from r(i) to r(i+1) if r(i) is a full
2034 # replica or r(i+1) is a partial replica
2035 if not r_list
[i
].is_partial() or r_list
[i
+1].is_partial():
2036 graph_list
[i
+1].add_edge_from(r_list
[i
].rep_dsa_dnstr
)
2038 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2039 # replica or ri is a partial replica.
2040 if not r_list
[i
+1].is_partial() or r_list
[i
].is_partial():
2041 graph_list
[i
].add_edge_from(r_list
[i
+1].rep_dsa_dnstr
)
2044 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2045 # or r0 is a partial replica.
2046 if not r_list
[r_len
-1].is_partial() or r_list
[0].is_partial():
2047 graph_list
[0].add_edge_from(r_list
[r_len
-1].rep_dsa_dnstr
)
2049 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2050 # r|R|-1 is a partial replica.
2051 if not r_list
[0].is_partial() or r_list
[r_len
-1].is_partial():
2052 graph_list
[r_len
-1].add_edge_from(r_list
[0].rep_dsa_dnstr
)
2054 # For each existing nTDSConnection object implying an edge
2055 # from rj of R to ri such that j != i, an edge from rj to ri
2056 # is not already in the graph, and the total edges directed
2057 # to ri is less than n+2, the KCC adds that edge to the graph.
2060 dsa
= self
.my_site
.dsa_table
[graph_list
[i
].dsa_dnstr
]
2061 graph_list
[i
].add_edges_from_connections(dsa
)
2066 tnode
= graph_list
[i
]
2068 # To optimize replication latency in sites with many NC replicas, the
2069 # KCC adds new edges directed to ri to bring the total edges to n+2,
2070 # where the NC replica rk of R from which the edge is directed
2071 # is chosen at random such that k != i and an edge from rk to ri
2072 # is not already in the graph.
2074 # Note that the KCC tech ref does not give a number for the definition
2075 # of "sites with many NC replicas". At a bare minimum to satisfy
2076 # n+2 edges directed at a node we have to have at least three replicas
2077 # in |R| (i.e. if n is zero then at least replicas from two other graph
2078 # nodes may direct edges to us).
2080 # pick a random index
2081 findex
= rindex
= random
.randint(0, r_len
-1)
2083 # while this node doesn't have sufficient edges
2084 while not tnode
.has_sufficient_edges():
2085 # If this edge can be successfully added (i.e. not
2086 # the same node and edge doesn't already exist) then
2087 # select a new random index for the next round
2088 if tnode
.add_edge_from(graph_list
[rindex
].dsa_dnstr
):
2089 findex
= rindex
= random
.randint(0, r_len
-1)
2091 # Otherwise continue looking against each node
2092 # after the random selection
2097 if rindex
== findex
:
2098 logger
.error("Unable to satisfy max edge criteria!")
2101 # Print the graph node in debug mode
2102 logger
.debug("%s" % tnode
)
2104 # For each edge directed to the local DC, ensure a nTDSConnection
2105 # points to us that satisfies the KCC criteria
2106 if graph_list
[i
].dsa_dnstr
== dc_local
.dsa_dnstr
:
2107 graph_list
[i
].add_connections_from_edges(dc_local
)
2111 def intrasite(self
):
2112 """The head method for generating the intra-site KCC replica
2113 connection graph and attendant nTDSConnection objects
2119 logger
.debug("intrasite(): enter")
2121 # Test whether local site has topology disabled
2122 mysite
= self
.site_table
[str(self
.my_site_guid
)]
2123 if mysite
.is_intrasite_topology_disabled():
2126 detect_stale
= (not mysite
.is_detect_stale_disabled())
2128 # Loop thru all the partitions.
2129 for partdn
, part
in self
.part_table
.items():
2130 self
.construct_intrasite_graph(mysite
, mydsa
, part
, False,
2133 # If the DC is a GC server, the KCC constructs an additional NC
2134 # replica graph (and creates nTDSConnection objects) for the
2135 # config NC as above, except that only NC replicas that "are present"
2136 # on GC servers are added to R.
2137 for partdn
, part
in self
.part_table
.items():
2138 if part
.is_config():
2139 self
.construct_intrasite_graph(mysite
, mydsa
, part
, True,
2142 # The DC repeats the NC replica graph computation and nTDSConnection
2143 # creation for each of the NC replica graphs, this time assuming
2144 # that no DC has failed. It does so by re-executing the steps as
2145 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2146 # set in the options attribute of the site settings object for
2147 # the local DC's site. (ie. we set "detec_stale" flag to False)
2149 # Loop thru all the partitions.
2150 for partdn
, part
in self
.part_table
.items():
2151 self
.construct_intrasite_graph(mysite
, mydsa
, part
, False,
2152 False) # don't detect stale
2154 # If the DC is a GC server, the KCC constructs an additional NC
2155 # replica graph (and creates nTDSConnection objects) for the
2156 # config NC as above, except that only NC replicas that "are present"
2157 # on GC servers are added to R.
2158 for partdn
, part
in self
.part_table
.items():
2159 if part
.is_config():
2160 self
.construct_intrasite_graph(mysite
, mydsa
, part
, True,
2161 False) # don't detect stale
2164 # Display any to be added or modified repsFrom
2165 for dnstr
, connect
in mydsa
.connect_table
.items():
2166 if connect
.to_be_deleted
:
2167 logger
.info("TO BE DELETED:\n%s" % connect
)
2168 if connect
.to_be_modified
:
2169 logger
.info("TO BE MODIFIED:\n%s" % connect
)
2170 if connect
.to_be_added
:
2171 logger
.info("TO BE ADDED:\n%s" % connect
)
2173 mydsa
.commit_connections(self
.samdb
, ro
=True)
2175 # Commit any newly created connections to the samdb
2176 mydsa
.commit_connections(self
.samdb
)
2178 def run(self
, dburl
, lp
, creds
):
2179 """Method to perform a complete run of the KCC and
2180 produce an updated topology for subsequent NC replica
2181 syncronization between domain controllers
2183 # We may already have a samdb setup if we are
2184 # currently importing an ldif for a test run
2185 if self
.samdb
is None:
2187 self
.samdb
= SamDB(url
=dburl
,
2188 session_info
=system_session(),
2189 credentials
=creds
, lp
=lp
)
2191 except ldb
.LdbError
, (num
, msg
):
2192 logger
.error("Unable to open sam database %s : %s" %
2201 self
.load_all_sites()
2202 self
.load_all_partitions()
2203 self
.load_all_transports()
2204 self
.load_all_sitelinks()
2206 # These are the published steps (in order) for the
2207 # MS-TECH description of the KCC algorithm
2210 self
.refresh_failed_links_connections()
2216 all_connected
= self
.intersite()
2219 self
.remove_unneeded_ntdsconn(all_connected
)
2222 self
.translate_ntdsconn()
2225 self
.remove_unneeded_failed_links_connections()
2228 self
.update_rodc_connection()
2234 def import_ldif(self
, dburl
, lp
, creds
, ldif_file
):
2235 """Routine to import all objects and attributes that are relevent
2236 to the KCC algorithms from a previously exported LDIF file.
2238 The point of this function is to allow a programmer/debugger to
2239 import an LDIF file with non-security relevent information that
2240 was previously extracted from a DC database. The LDIF file is used
2241 to create a temporary abbreviated database. The KCC algorithm can
2242 then run against this abbreviated database for debug or test
2243 verification that the topology generated is computationally the
2244 same between different OSes and algorithms.
2246 :param dburl: path to the temporary abbreviated db to create
2247 :param ldif_file: path to the ldif file to import
2249 if os
.path
.exists(dburl
):
2250 logger
.error("Specify a database (%s) that doesn't already exist." %
2254 # Use ["modules:"] as we are attempting to build a sam
2255 # database as opposed to start it here.
2256 self
.samdb
= Ldb(url
=dburl
, session_info
=system_session(),
2257 lp
=lp
, options
=["modules:"])
2259 self
.samdb
.transaction_start()
2261 data
= read_and_sub_file(ldif_file
, None)
2262 self
.samdb
.add_ldif(data
, None)
2264 except Exception, estr
:
2265 logger
.error("%s" % estr
)
2266 self
.samdb
.transaction_cancel()
2269 self
.samdb
.transaction_commit()
2273 # We have an abbreviated list of options here because we have built
2274 # an abbreviated database. We use the rootdse and extended-dn
2275 # modules only during this re-open
2276 self
.samdb
= SamDB(url
=dburl
, session_info
=system_session(),
2277 credentials
=creds
, lp
=lp
,
2278 options
=["modules:rootdse,extended_dn_out_ldb"])
2281 def export_ldif(self
, dburl
, lp
, creds
, ldif_file
):
2282 """Routine to extract all objects and attributes that are relevent
2283 to the KCC algorithms from a DC database.
2285 The point of this function is to allow a programmer/debugger to
2286 extract an LDIF file with non-security relevent information from
2287 a DC database. The LDIF file can then be used to "import" via
2288 the import_ldif() function this file into a temporary abbreviated
2289 database. The KCC algorithm can then run against this abbreviated
2290 database for debug or test verification that the topology generated
2291 is computationally the same between different OSes and algorithms.
2293 :param dburl: LDAP database URL to extract info from
2294 :param ldif_file: output LDIF file name to create
2297 self
.samdb
= SamDB(url
=dburl
,
2298 session_info
=system_session(),
2299 credentials
=creds
, lp
=lp
)
2300 except ldb
.LdbError
, (enum
, estr
):
2301 logger
.error("Unable to open sam database (%s) : %s" %
2305 if os
.path
.exists(ldif_file
):
2306 logger
.error("Specify a file (%s) that doesn't already exist." %
2311 f
= open(ldif_file
, "w")
2312 except IOError as ioerr
:
2313 logger
.error("Unable to open (%s) : %s" % (ldif_file
, str(ioerr
)))
2318 attrs
= [ "objectClass",
2327 "msDS-NC-Replica-Locations",
2328 "msDS-NC-RO-Replica-Locations" ]
2330 sstr
= "CN=Partitions,%s" % self
.samdb
.get_config_basedn()
2331 res
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2333 expression
="(objectClass=crossRef)")
2335 # Write partitions output
2336 write_search_result(self
.samdb
, f
, res
)
2338 # Query cross reference container
2339 attrs
= [ "objectClass",
2345 "msDS-Behavior-Version",
2346 "msDS-EnabledFeature" ]
2348 sstr
= "CN=Partitions,%s" % self
.samdb
.get_config_basedn()
2349 res
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2351 expression
="(objectClass=crossRefContainer)")
2353 # Write cross reference container output
2354 write_search_result(self
.samdb
, f
, res
)
2357 attrs
= [ "objectClass",
2363 sstr
= "CN=Sites,%s" % self
.samdb
.get_config_basedn()
2364 sites
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2366 expression
="(objectClass=site)")
2368 # Write sites output
2369 write_search_result(self
.samdb
, f
, sites
)
2371 # Query NTDS Site Settings
2373 sitestr
= str(msg
.dn
)
2375 attrs
= [ "objectClass",
2379 "interSiteTopologyGenerator",
2380 "interSiteTopologyFailover",
2384 sstr
= "CN=NTDS Site Settings,%s" % sitestr
2385 res
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_BASE
,
2388 # Write Site Settings output
2389 write_search_result(self
.samdb
, f
, res
)
2391 # Naming context list
2394 # Query Directory Service Agents
2398 ncattrs
= [ "hasMasterNCs",
2399 "msDS-hasMasterNCs",
2400 "hasPartialReplicaNCs",
2401 "msDS-HasDomainNCs",
2402 "msDS-hasFullReplicaNCs",
2403 "msDS-HasInstantiatedNCs" ]
2404 attrs
= [ "objectClass",
2411 "msDS-Behavior-Version" ]
2413 res
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2414 attrs
=attrs
+ ncattrs
,
2415 expression
="(objectClass=nTDSDSA)")
2417 # Spin thru all the DSAs looking for NC replicas
2418 # and build a list of all possible Naming Contexts
2419 # for subsequent retrieval below
2421 for k
in msg
.keys():
2423 for value
in msg
[k
]:
2424 # Some of these have binary DNs so
2425 # use dsdb_Dn to split out relevent parts
2426 dsdn
= dsdb_Dn(self
.samdb
, value
)
2427 dnstr
= str(dsdn
.dn
)
2428 if dnstr
not in nclist
:
2429 nclist
.append(dnstr
)
2432 write_search_result(self
.samdb
, f
, res
)
2434 # Query NTDS Connections
2438 attrs
= [ "objectClass",
2444 "enabledConnection",
2450 res
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2452 expression
="(objectClass=nTDSConnection)")
2453 # Write NTDS Connection output
2454 write_search_result(self
.samdb
, f
, res
)
2457 # Query Intersite transports
2458 attrs
= [ "objectClass",
2464 "bridgeheadServerListBL",
2465 "transportAddressAttribute" ]
2467 sstr
= "CN=Inter-Site Transports,CN=Sites,%s" % \
2468 self
.samdb
.get_config_basedn()
2469 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2471 expression
="(objectClass=interSiteTransport)")
2473 # Write inter-site transport output
2474 write_search_result(self
.samdb
, f
, res
)
2477 attrs
= [ "objectClass",
2488 sstr
= "CN=Sites,%s" % \
2489 self
.samdb
.get_config_basedn()
2490 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2492 expression
="(objectClass=siteLink)",
2493 controls
=['extended_dn:0'])
2495 # Write siteLink output
2496 write_search_result(self
.samdb
, f
, res
)
2498 # Query siteLinkBridge
2499 attrs
= [ "objectClass",
2505 sstr
= "CN=Sites,%s" % self
.samdb
.get_config_basedn()
2506 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2508 expression
="(objectClass=siteLinkBridge)")
2510 # Write siteLinkBridge output
2511 write_search_result(self
.samdb
, f
, res
)
2513 # Query servers containers
2514 # Needed for samdb.server_site_name()
2515 attrs
= [ "objectClass",
2521 sstr
= "CN=Sites,%s" % self
.samdb
.get_config_basedn()
2522 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2524 expression
="(objectClass=serversContainer)")
2526 # Write servers container output
2527 write_search_result(self
.samdb
, f
, res
)
2530 # Needed because some transport interfaces refer back to
2531 # attributes found in the server object. Also needed
2532 # so extended-dn will be happy with dsServiceName in rootDSE
2533 attrs
= [ "objectClass",
2541 sstr
= "CN=Sites,%s" % self
.samdb
.get_config_basedn()
2542 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2544 expression
="(objectClass=server)")
2546 # Write server output
2547 write_search_result(self
.samdb
, f
, res
)
2549 # Query Naming Context replicas
2550 attrs
= [ "objectClass",
2556 "msDS-Behavior-Version",
2561 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_BASE
,
2564 # Write naming context output
2565 write_search_result(self
.samdb
, f
, res
)
2567 # Query rootDSE replicas
2568 attrs
=[ "objectClass",
2572 "rootDomainNamingContext",
2573 "configurationNamingContext",
2574 "schemaNamingContext",
2575 "defaultNamingContext",
2579 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_BASE
,
2582 # Record the rootDSE object as a dn as it
2583 # would appear in the base ldb file. We have
2584 # to save it this way because we are going to
2585 # be importing as an abbreviated database.
2586 res
[0].dn
= ldb
.Dn(self
.samdb
, "@ROOTDSE")
2588 # Write rootdse output
2589 write_search_result(self
.samdb
, f
, res
)
2591 except ldb
.LdbError
, (enum
, estr
):
2592 logger
.error("Error processing (%s) : %s" % (sstr
, estr
))
2598 ##################################################
2600 ##################################################
2601 def sort_replica_by_dsa_guid(rep1
, rep2
):
2602 return cmp(rep1
.rep_dsa_guid
, rep2
.rep_dsa_guid
)
2604 def sort_dsa_by_gc_and_guid(dsa1
, dsa2
):
2605 if dsa1
.is_gc() and not dsa2
.is_gc():
2607 if not dsa1
.is_gc() and dsa2
.is_gc():
2609 return cmp(dsa1
.dsa_guid
, dsa2
.dsa_guid
)
2611 def is_smtp_replication_available():
2612 """Currently always returns false because Samba
2613 doesn't implement SMTP transfer for NC changes
2618 def write_search_result(samdb
, f
, res
):
2620 lstr
= samdb
.write_ldif(msg
, ldb
.CHANGETYPE_NONE
)
2621 f
.write("%s" % lstr
)
2623 def create_edge(con_type
, site_link
, guid_to_vertex
):
2625 e
.site_link
= site_link
2627 for site_guid
in site_link
.site_list
:
2628 if str(site_guid
) in guid_to_vertex
:
2629 e
.vertices
.extend(guid_to_vertex
.get(str(site_guid
)))
2630 e
.repl_info
.cost
= site_link
.cost
2631 e
.repl_info
.options
= site_link
.options
2632 e
.repl_info
.interval
= site_link
.interval
2633 e
.repl_info
.schedule
= site_link
.schedule
2634 e
.con_type
= con_type
2638 def create_auto_edge_set(graph
, transport
):
2639 e_set
= MultiEdgeSet()
2640 e_set
.guid
= misc
.GUID() # NULL guid, not associated with a SiteLinkBridge object
2641 for site_link
in graph
.edges
:
2642 if site_link
.con_type
== transport
:
2643 e_set
.edges
.append(site_link
)
2647 def create_edge_set(graph
, transport
, site_link_bridge
):
2648 # TODO not implemented - need to store all site link bridges
2649 e_set
= MultiEdgeSet()
2650 # e_set.guid = site_link_bridge
2653 def setup_vertices(graph
):
2654 for v
in graph
.vertices
:
2656 v
.repl_info
.cost
= MAX_DWORD
2658 v
.component_id
= None
2660 v
.repl_info
.cost
= 0
2664 v
.repl_info
.interval
= 0
2665 v
.repl_info
.options
= 0xFFFFFFFF
2666 v
.repl_info
.schedule
= None # TODO highly suspicious
2669 def dijkstra(graph
, edge_type
, include_black
):
2671 setup_dijkstra(graph
, edge_type
, include_black
, queue
)
2672 while len(queue
) > 0:
2673 cost
, guid
, vertex
= heapq
.heappop(queue
)
2674 for edge
in vertex
.edges
:
2675 for v
in edge
.vertices
:
2677 # add new path from vertex to v
2678 try_new_path(graph
, queue
, vertex
, edge
, v
)
2680 def setup_dijkstra(graph
, edge_type
, include_black
, queue
):
2681 setup_vertices(graph
)
2682 for vertex
in graph
.vertices
:
2683 if vertex
.is_white():
2686 if ((vertex
.is_black() and not include_black
)
2687 or edge_type
not in vertex
.accept_black
2688 or edge_type
not in vertex
.accept_red_red
):
2689 vertex
.repl_info
.cost
= MAX_DWORD
2690 vertex
.root
= None # NULL GUID
2691 vertex
.demoted
= True # Demoted appears not to be used
2693 heapq
.heappush(queue
, (vertex
.repl_info
.cost
, vertex
.guid
, vertex
))
2695 def try_new_path(graph
, queue
, vfrom
, edge
, vto
):
2697 # What this function checks is that there is a valid time frame for
2698 # which replication can actually occur, despite being adequately
2700 intersect
= combine_repl_info(vfrom
.repl_info
, edge
.repl_info
, newRI
)
2702 # If the new path costs more than the current, then ignore the edge
2703 if newRI
.cost
> vto
.repl_info
.cost
:
2706 if newRI
.cost
< vto
.repl_info
.cost
and not intersect
:
2709 new_duration
= total_schedule(newRI
.schedule
)
2710 old_duration
= total_schedule(vto
.repl_info
.schedule
)
2712 # Cheaper or longer schedule
2713 if newRI
.cost
< vto
.repl_info
.cost
or new_duration
> old_duration
:
2714 vto
.root
= vfrom
.root
2715 vto
.component_id
= vfrom
.component_id
2716 vto
.repl_info
= newRI
2717 heapq
.heappush(queue
, (vto
.repl_info
.cost
, vto
.guid
, vto
))
2719 def check_demote_vertex(vertex
, edge_type
):
2720 if vertex
.is_white():
2723 # Accepts neither red-red nor black edges, demote
2724 if edge_type
not in vertex
.accept_black
and edge_type
not in vertex
.accept_red_red
:
2725 vertex
.repl_info
.cost
= MAX_DWORD
2727 vertex
.demoted
= True # Demoted appears not to be used
2729 def undemote_vertex(vertex
):
2730 if vertex
.is_white():
2733 vertex
.repl_info
.cost
= 0
2734 vertex
.root
= vertex
2735 vertex
.demoted
= False
2737 def process_edge_set(graph
, e_set
, internal_edges
):
2739 for edge
in graph
.edges
:
2740 for vertex
in edge
.vertices
:
2741 check_demote_vertex(vertex
, edge
.con_type
)
2742 process_edge(graph
, edge
, internal_edges
)
2743 for vertex
in edge
.vertices
:
2744 undemote_vertex(vertex
)
2746 for edge
in e_set
.edges
:
2747 process_edge(graph
, edge
, internal_edges
)
2749 def process_edge(graph
, examine
, internal_edges
):
2750 # Find the set of all vertices touches the edge to examine
2752 for v
in examine
.vertices
:
2753 # Append a 4-tuple of color, repl cost, guid and vertex
2754 vertices
.append((v
.color
, v
.repl_info
.cost
, v
.guid
, v
))
2755 # Sort by color, lower
2758 color
, cost
, guid
, bestv
= vertices
[0]
2759 # Add to internal edges an edge from every colored vertex to bestV
2760 for v
in examine
.vertices
:
2761 if v
.component_id
is None or v
.root
is None:
2764 # Only add edge if valid inter-tree edge - needs a root and
2765 # different components
2766 if (bestv
.component_id
is not None and bestv
.root
is not None
2767 and v
.component_id
is not None and v
.root
is not None and
2768 bestv
.component_id
!= v
.component_id
):
2769 add_int_edge(graph
, internal_edges
, examine
, bestv
, v
)
2771 # Add internal edge, endpoints are roots of the vertices to pass in and are always colored
2772 def add_int_edge(graph
, internal_edges
, examine
, v1
, v2
):
2777 if root1
.is_red() and root2
.is_red():
2781 if (examine
.con_type
not in root1
.accept_red_red
2782 or examine
.con_type
not in root2
.accept_red_red
):
2785 if (examine
.con_type
not in root1
.accept_black
2786 or examine
.con_type
not in root2
.accept_black
):
2792 # Create the transitive replInfo for the two trees and this edge
2793 if not combine_repl_info(v1
.repl_info
, v2
.repl_info
, ri
):
2795 # ri is now initialized
2796 if not combine_repl_info(ri
, examine
.repl_info
, ri2
):
2799 newIntEdge
= InternalEdge(root1
, root2
, red_red
, ri2
, examine
.con_type
)
2800 # Order by vertex guid
2801 if newIntEdge
.v1
.guid
> newIntEdge
.v2
.guid
:
2802 newIntEdge
.v1
= root2
2803 newIntEdge
.v2
= root1
2805 internal_edges
.add(newIntEdge
)
2807 def kruskal(graph
, edges
):
2808 for v
in graph
.vertices
:
2811 components
= set([x
for x
in graph
.vertices
if not x
.is_white()])
2814 # Sorted based on internal comparison function of internal edge
2817 expected_num_tree_edges
= 0 # TODO this value makes little sense
2822 while index
< len(edges
): # TODO and num_components > 1
2824 parent1
= find_component(e
.v1
)
2825 parent2
= find_component(e
.v2
)
2826 if parent1
is not parent2
:
2828 add_out_edge(graph
, output_edges
, e
)
2829 parent1
.component_id
= parent2
2830 components
.discard(parent1
)
2834 return output_edges
, len(components
)
2836 def find_component(vertex
):
2837 if vertex
.component_id
is vertex
:
2841 while current
.component_id
is not current
:
2842 current
= current
.component_id
2846 while current
.component_id
is not root
:
2847 n
= current
.component_id
2848 current
.component_id
= root
2853 def add_out_edge(graph
, output_edges
, e
):
2857 # This multi-edge is a 'real' edge with no GUID
2860 ee
.vertices
.append(v1
)
2861 ee
.vertices
.append(v2
)
2862 ee
.con_type
= e
.e_type
2863 ee
.repl_info
= e
.repl_info
2864 output_edges
.append(ee
)
2871 ##################################################
2872 # samba_kcc entry point
2873 ##################################################
2875 parser
= optparse
.OptionParser("samba_kcc [options]")
2876 sambaopts
= options
.SambaOptions(parser
)
2877 credopts
= options
.CredentialsOptions(parser
)
2879 parser
.add_option_group(sambaopts
)
2880 parser
.add_option_group(credopts
)
2881 parser
.add_option_group(options
.VersionOptions(parser
))
2883 parser
.add_option("--readonly",
2884 help="compute topology but do not update database",
2885 action
="store_true")
2887 parser
.add_option("--debug",
2888 help="debug output",
2889 action
="store_true")
2891 parser
.add_option("--seed",
2892 help="random number seed",
2893 type=str, metavar
="<number>")
2895 parser
.add_option("--importldif",
2896 help="import topology ldif file",
2897 type=str, metavar
="<file>")
2899 parser
.add_option("--exportldif",
2900 help="export topology ldif file",
2901 type=str, metavar
="<file>")
2903 parser
.add_option("-H", "--URL" ,
2904 help="LDB URL for database or target server",
2905 type=str, metavar
="<URL>", dest
="dburl")
2907 parser
.add_option("--tmpdb",
2908 help="schemaless database file to create for ldif import",
2909 type=str, metavar
="<file>")
2911 logger
= logging
.getLogger("samba_kcc")
2912 logger
.addHandler(logging
.StreamHandler(sys
.stdout
))
2914 lp
= sambaopts
.get_loadparm()
2915 creds
= credopts
.get_credentials(lp
, fallback_machine
=True)
2917 opts
, args
= parser
.parse_args()
2919 if opts
.readonly
is None:
2920 opts
.readonly
= False
2923 logger
.setLevel(logging
.DEBUG
)
2925 logger
.setLevel(logging
.INFO
)
2927 logger
.setLevel(logging
.WARNING
)
2929 # initialize seed from optional input parameter
2931 random
.seed(int(opts
.seed
))
2933 random
.seed(0xACE5CA11)
2935 if opts
.dburl
is None:
2936 opts
.dburl
= lp
.samdb_url()
2938 # Instantiate Knowledge Consistency Checker and perform run
2942 rc
= kcc
.export_ldif(opts
.dburl
, lp
, creds
, opts
.exportldif
)
2946 if opts
.tmpdb
is None or opts
.tmpdb
.startswith('ldap'):
2947 logger
.error("Specify a target temp database file with --tmpdb option.")
2950 rc
= kcc
.import_ldif(opts
.tmpdb
, lp
, creds
, opts
.importldif
)
2954 rc
= kcc
.run(opts
.dburl
, lp
, creds
)