3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 3 of the License, or
10 # (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 # ensure we get messages out immediately, so they get in the samba logs,
25 # and don't get swallowed by a timeout
26 os
.environ
['PYTHONUNBUFFERED'] = '1'
28 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
29 # heimdal can get mutual authentication errors due to the 24 second difference
30 # between UTC and GMT when using some zone files (eg. the PDT zone from
32 os
.environ
["TZ"] = "GMT"
34 # Find right directory when running from source tree
35 sys
.path
.insert(0, "bin/python")
48 from samba
.auth
import system_session
49 from samba
.samdb
import SamDB
50 from samba
.dcerpc
import drsuapi
51 from samba
.kcc_utils
import *
56 """The Knowledge Consistency Checker class.
58 A container for objects and methods allowing a run of the KCC. Produces a
59 set of connections in the samdb for which the Distributed Replication
60 Service can then utilize to replicate naming contexts
63 """Initializes the partitions class which can hold
64 our local DCs partitions or all the partitions in
67 self
.part_table
= {} # partition objects
69 self
.transport_table
= {}
70 self
.sitelink_table
= {}
72 # TODO: These should be backed by a 'permanent' store so that when
73 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
74 # the failure information can be returned
75 self
.kcc_failed_links
= {}
76 self
.kcc_failed_connections
= set()
78 # Used in inter-site topology computation. A list
79 # of connections (by NTDSConnection object) that are
80 # to be kept when pruning un-needed NTDS Connections
81 self
.keep_connection_list
= []
83 self
.my_dsa_dnstr
= None # My dsa DN
84 self
.my_dsa
= None # My dsa object
86 self
.my_site_dnstr
= None
91 def load_all_transports(self
):
92 """Loads the inter-site transport objects for Sites
94 ::returns: Raises an Exception on error
97 res
= self
.samdb
.search("CN=Inter-Site Transports,CN=Sites,%s" %
98 self
.samdb
.get_config_basedn(),
99 scope
=ldb
.SCOPE_SUBTREE
,
100 expression
="(objectClass=interSiteTransport)")
101 except ldb
.LdbError
, (enum
, estr
):
102 raise Exception("Unable to find inter-site transports - (%s)" %
108 transport
= Transport(dnstr
)
110 transport
.load_transport(self
.samdb
)
113 if str(transport
.guid
) in self
.transport_table
.keys():
116 # Assign this transport to table
118 self
.transport_table
[str(transport
.guid
)] = transport
120 def load_all_sitelinks(self
):
121 """Loads the inter-site siteLink objects
123 ::returns: Raises an Exception on error
126 res
= self
.samdb
.search("CN=Inter-Site Transports,CN=Sites,%s" %
127 self
.samdb
.get_config_basedn(),
128 scope
=ldb
.SCOPE_SUBTREE
,
129 expression
="(objectClass=siteLink)")
130 except ldb
.LdbError
, (enum
, estr
):
131 raise Exception("Unable to find inter-site siteLinks - (%s)" % estr
)
137 if dnstr
in self
.sitelink_table
.keys():
140 sitelink
= SiteLink(dnstr
)
142 sitelink
.load_sitelink(self
.samdb
)
144 # Assign this siteLink to table
146 self
.sitelink_table
[dnstr
] = sitelink
148 def load_my_site(self
):
149 """Loads the Site class for the local DSA
151 ::returns: Raises an Exception on error
153 self
.my_site_dnstr
= "CN=%s,CN=Sites,%s" % (
154 self
.samdb
.server_site_name(),
155 self
.samdb
.get_config_basedn())
156 site
= Site(self
.my_site_dnstr
)
157 site
.load_site(self
.samdb
)
159 self
.site_table
[self
.my_site_dnstr
] = site
162 def load_all_sites(self
):
163 """Discover all sites and instantiate and load each
166 ::returns: Raises an Exception on error
169 res
= self
.samdb
.search("CN=Sites,%s" %
170 self
.samdb
.get_config_basedn(),
171 scope
=ldb
.SCOPE_SUBTREE
,
172 expression
="(objectClass=site)")
173 except ldb
.LdbError
, (enum
, estr
):
174 raise Exception("Unable to find sites - (%s)" % estr
)
177 sitestr
= str(msg
.dn
)
180 if sitestr
in self
.site_table
.keys():
184 site
.load_site(self
.samdb
)
186 self
.site_table
[sitestr
] = site
188 def load_my_dsa(self
):
189 """Discover my nTDSDSA dn thru the rootDSE entry
191 ::returns: Raises an Exception on error.
193 dn
= ldb
.Dn(self
.samdb
, "")
195 res
= self
.samdb
.search(base
=dn
, scope
=ldb
.SCOPE_BASE
,
196 attrs
=["dsServiceName"])
197 except ldb
.LdbError
, (enum
, estr
):
198 raise Exception("Unable to find my nTDSDSA - (%s)" % estr
)
200 self
.my_dsa_dnstr
= res
[0]["dsServiceName"][0]
201 self
.my_dsa
= self
.my_site
.get_dsa(self
.my_dsa_dnstr
)
203 def load_all_partitions(self
):
204 """Discover all NCs thru the Partitions dn and
205 instantiate and load the NCs.
207 Each NC is inserted into the part_table by partition
208 dn string (not the nCName dn string)
210 ::returns: Raises an Exception on error
213 res
= self
.samdb
.search("CN=Partitions,%s" %
214 self
.samdb
.get_config_basedn(),
215 scope
=ldb
.SCOPE_SUBTREE
,
216 expression
="(objectClass=crossRef)")
217 except ldb
.LdbError
, (enum
, estr
):
218 raise Exception("Unable to find partitions - (%s)" % estr
)
221 partstr
= str(msg
.dn
)
224 if partstr
in self
.part_table
.keys():
227 part
= Partition(partstr
)
229 part
.load_partition(self
.samdb
)
230 self
.part_table
[partstr
] = part
232 def should_be_present_test(self
):
233 """Enumerate all loaded partitions and DSAs in local
234 site and test if NC should be present as replica
236 for partdn
, part
in self
.part_table
.items():
237 for dsadn
, dsa
in self
.my_site
.dsa_table
.items():
238 needed
, ro
, partial
= part
.should_be_present(dsa
)
239 logger
.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
240 (dsadn
, part
.nc_dnstr
, needed
, ro
, partial
))
242 def refresh_failed_links_connections(self
):
243 """Instead of NULL link with failure_count = 0, the tuple is simply removed"""
245 # LINKS: Refresh failed links
246 self
.kcc_failed_links
= {}
247 current
, needed
= self
.my_dsa
.get_rep_tables()
248 for replica
in current
.values():
249 # For every possible connection to replicate
250 for reps_from
in replica
.rep_repsFrom
:
251 failure_count
= reps_from
.consecutive_sync_failures
252 if failure_count
<= 0:
255 dsa_guid
= str(reps_from
.source_dsa_obj_guid
)
256 time_first_failure
= reps_from
.last_success
257 last_result
= reps_from
.last_attempt
258 dns_name
= reps_from
.dns_name1
260 f
= self
.kcc_failed_links
.get(dsa_guid
)
262 f
= KCCFailedObject(dsa_guid
, failure_count
,
263 time_first_failure
, last_result
,
265 self
.kcc_failed_links
[dsa_guid
] = f
266 #elif f.failure_count == 0:
267 # f.failure_count = failure_count
268 # f.time_first_failure = time_first_failure
269 # f.last_result = last_result
271 f
.failure_count
= max(f
.failure_count
, failure_count
)
272 f
.time_first_failure
= min(f
.time_first_failure
, time_first_failure
)
273 f
.last_result
= last_result
275 # CONNECTIONS: Refresh failed connections
276 restore_connections
= set()
277 for connection
in self
.kcc_failed_connections
:
279 drs_utils
.drsuapi_connect(connection
.dns_name
, lp
, creds
)
280 # Failed connection is no longer failing
281 restore_connections
.add(connection
)
282 except drs_utils
.drsException
:
283 # Failed connection still failing
284 connection
.failure_count
+= 1
286 # Remove the restored connections from the failed connections
287 self
.kcc_failed_connections
.difference_update(restore_connections
)
289 def is_stale_link_connection(self
, target_dsa
):
290 """Returns False if no tuple z exists in the kCCFailedLinks or
291 kCCFailedConnections variables such that z.UUIDDsa is the
292 objectGUID of the target dsa, z.FailureCount > 0, and
293 the current time - z.TimeFirstFailure > 2 hours.
295 # Returns True if tuple z exists...
296 failed_link
= self
.kcc_failed_links
.get(str(target_dsa
.dsa_guid
))
298 # failure_count should be > 0, but check anyways
299 if failed_link
.failure_count
> 0:
300 unix_first_time_failure
= nttime2unix(failed_link
.time_first_failure
)
301 # TODO guard against future
302 current_time
= int(time
.time())
303 if unix_first_time_failure
> current_time
:
304 logger
.error("The last success time attribute for \
305 repsFrom is in the future!")
307 # Perform calculation in seconds
308 if (current_time
- unix_first_time_failure
) > 60 * 60 * 2:
315 # TODO: This should be backed by some form of local database
316 def remove_unneeded_failed_links_connections(self
):
317 # Remove all tuples in kcc_failed_links where failure count = 0
318 # In this implementation, this should never happen.
320 # Remove all connections which were not used this run or connections
321 # that became active during this run.
324 def remove_unneeded_ntdsconn(self
, all_connected
):
325 """Removes unneeded NTDS Connections after computation
326 of KCC intra and inter-site topology has finished.
330 # Loop thru connections
331 for cn_dnstr
, cn_conn
in mydsa
.connect_table
.items():
333 s_dnstr
= cn_conn
.get_from_dnstr()
335 cn_conn
.to_be_deleted
= True
338 # Get the source DSA no matter what site
339 s_dsa
= self
.get_dsa(s_dnstr
)
341 # Check if the DSA is in our site
342 if self
.my_site
.same_site(s_dsa
):
347 # Given an nTDSConnection object cn, if the DC with the
348 # nTDSDSA object dc that is the parent object of cn and
349 # the DC with the nTDSDA object referenced by cn!fromServer
350 # are in the same site, the KCC on dc deletes cn if all of
351 # the following are true:
353 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
355 # No site settings object s exists for the local DC's site, or
356 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
359 # Another nTDSConnection object cn2 exists such that cn and
360 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
363 # cn!whenCreated < cn2!whenCreated
365 # cn!whenCreated = cn2!whenCreated and
366 # cn!objectGUID < cn2!objectGUID
368 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
370 if not cn_conn
.is_generated():
373 if self
.my_site
.is_cleanup_ntdsconn_disabled():
376 # Loop thru connections looking for a duplicate that
377 # fulfills the previous criteria
380 for cn2_dnstr
, cn2_conn
in mydsa
.connect_table
.items():
381 if cn2_conn
is cn_conn
:
384 s2_dnstr
= cn2_conn
.get_from_dnstr()
388 # If the NTDS Connections has a different
389 # fromServer field then no match
390 if s2_dnstr
!= s_dnstr
:
393 lesser
= (cn_conn
.whenCreated
< cn2_conn
.whenCreated
or
394 (cn_conn
.whenCreated
== cn2_conn
.whenCreated
and
395 cmp(cn_conn
.guid
, cn2_conn
.guid
) < 0))
400 if lesser
and not cn_conn
.is_rodc_topology():
401 cn_conn
.to_be_deleted
= True
403 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
404 # object dc that is the parent object of cn and the DC with
405 # the nTDSDSA object referenced by cn!fromServer are in
406 # different sites, a KCC acting as an ISTG in dc's site
407 # deletes cn if all of the following are true:
409 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
411 # cn!fromServer references an nTDSDSA object for a DC
412 # in a site other than the local DC's site.
414 # The keepConnections sequence returned by
415 # CreateIntersiteConnections() does not contain
416 # cn!objectGUID, or cn is "superseded by" (see below)
417 # another nTDSConnection cn2 and keepConnections
418 # contains cn2!objectGUID.
420 # The return value of CreateIntersiteConnections()
423 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
426 else: # different site
428 if not mydsa
.is_istg():
431 if not cn_conn
.is_generated():
435 # We are directly using this connection in intersite or
436 # we are using a connection which can supersede this one.
438 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
439 # appear to be correct.
441 # 1. cn!fromServer and cn!parent appear inconsistent with no cn2
442 # 2. The repsFrom do not imply each other
444 if self
.keep_connection(cn_conn
): # and not_superceded:
447 # This is the result of create_intersite_connections
448 if not all_connected
:
451 if not cn_conn
.is_rodc_topology():
452 cn_conn
.to_be_deleted
= True
455 if mydsa
.is_ro() or opts
.readonly
:
456 for dnstr
, connect
in mydsa
.connect_table
.items():
457 if connect
.to_be_deleted
:
458 logger
.info("TO BE DELETED:\n%s" % connect
)
459 if connect
.to_be_added
:
460 logger
.info("TO BE ADDED:\n%s" % connect
)
462 # Peform deletion from our tables but perform
463 # no database modification
464 mydsa
.commit_connections(self
.samdb
, ro
=True)
466 # Commit any modified connections
467 mydsa
.commit_connections(self
.samdb
)
469 def get_dsa_by_guidstr(self
, guidstr
):
470 """Given a DSA guid string, consule all sites looking
471 for the corresponding DSA and return it.
473 for site
in self
.site_table
.values():
474 dsa
= site
.get_dsa_by_guidstr(guidstr
)
479 def get_dsa(self
, dnstr
):
480 """Given a DSA dn string, consule all sites looking
481 for the corresponding DSA and return it.
483 for site
in self
.site_table
.values():
484 dsa
= site
.get_dsa(dnstr
)
489 def modify_repsFrom(self
, n_rep
, t_repsFrom
, s_rep
, s_dsa
, cn_conn
):
490 """Update t_repsFrom if necessary to satisfy requirements. Such
491 updates are typically required when the IDL_DRSGetNCChanges
492 server has moved from one site to another--for example, to
493 enable compression when the server is moved from the
494 client's site to another site.
496 :param n_rep: NC replica we need
497 :param t_repsFrom: repsFrom tuple to modify
498 :param s_rep: NC replica at source DSA
499 :param s_dsa: source DSA
500 :param cn_conn: Local DSA NTDSConnection child
502 ::returns: (update) bit field containing which portion of the
503 repsFrom was modified. This bit field is suitable as input
504 to IDL_DRSReplicaModify ulModifyFields element, as it consists
506 drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
507 drsuapi.DRSUAPI_DRS_UPDATE_FLAGS
508 drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS
510 s_dnstr
= s_dsa
.dsa_dnstr
513 if self
.my_site
.same_site(s_dsa
):
518 times
= cn_conn
.convert_schedule_to_repltimes()
520 # if schedule doesn't match then update and modify
521 if times
!= t_repsFrom
.schedule
:
522 t_repsFrom
.schedule
= times
524 # Bit DRS_PER_SYNC is set in replicaFlags if and only
525 # if nTDSConnection schedule has a value v that specifies
526 # scheduled replication is to be performed at least once
528 if cn_conn
.is_schedule_minimum_once_per_week():
530 if (t_repsFrom
.replica_flags
&
531 drsuapi
.DRSUAPI_DRS_PER_SYNC
) == 0x0:
532 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_PER_SYNC
534 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
535 # if the source DSA and the local DC's nTDSDSA object are
536 # in the same site or source dsa is the FSMO role owner
537 # of one or more FSMO roles in the NC replica.
538 if same_site
or n_rep
.is_fsmo_role_owner(s_dnstr
):
540 if (t_repsFrom
.replica_flags
&
541 drsuapi
.DRSUAPI_DRS_INIT_SYNC
) == 0x0:
542 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_INIT_SYNC
544 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
545 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
546 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
547 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
548 # t.replicaFlags if and only if s and the local DC's
549 # nTDSDSA object are in different sites.
550 if (cn_conn
.options
& dsdb
.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT
) != 0x0:
552 if (cn_conn
.options
& dsdb
.NTDSCONN_OPT_USE_NOTIFY
) == 0x0:
554 if (t_repsFrom
.replica_flags
&
555 drsuapi
.DRSUAPI_DRS_NEVER_NOTIFY
) == 0x0:
556 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_NEVER_NOTIFY
560 if (t_repsFrom
.replica_flags
&
561 drsuapi
.DRSUAPI_DRS_NEVER_NOTIFY
) == 0x0:
562 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_NEVER_NOTIFY
564 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
565 # and only if s and the local DC's nTDSDSA object are
566 # not in the same site and the
567 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
568 # clear in cn!options
569 if (not same_site
and
571 dsdb
.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
) == 0x0):
573 if (t_repsFrom
.replica_flags
&
574 drsuapi
.DRSUAPI_DRS_USE_COMPRESSION
) == 0x0:
575 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_USE_COMPRESSION
577 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
578 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
579 if (cn_conn
.options
& dsdb
.NTDSCONN_OPT_TWOWAY_SYNC
) != 0x0:
581 if (t_repsFrom
.replica_flags
&
582 drsuapi
.DRSUAPI_DRS_TWOWAY_SYNC
) == 0x0:
583 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_TWOWAY_SYNC
585 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
586 # set in t.replicaFlags if and only if cn!enabledConnection = false.
587 if not cn_conn
.is_enabled():
589 if (t_repsFrom
.replica_flags
&
590 drsuapi
.DRSUAPI_DRS_DISABLE_AUTO_SYNC
) == 0x0:
591 t_repsFrom
.replica_flags |
= \
592 drsuapi
.DRSUAPI_DRS_DISABLE_AUTO_SYNC
594 if (t_repsFrom
.replica_flags
&
595 drsuapi
.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
) == 0x0:
596 t_repsFrom
.replica_flags |
= \
597 drsuapi
.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
599 # If s and the local DC's nTDSDSA object are in the same site,
600 # cn!transportType has no value, or the RDN of cn!transportType
603 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
605 # t.uuidTransport = NULL GUID.
607 # t.uuidDsa = The GUID-based DNS name of s.
611 # Bit DRS_MAIL_REP in t.replicaFlags is set.
613 # If x is the object with dsname cn!transportType,
614 # t.uuidTransport = x!objectGUID.
616 # Let a be the attribute identified by
617 # x!transportAddressAttribute. If a is
618 # the dNSHostName attribute, t.uuidDsa = the GUID-based
619 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
621 # It appears that the first statement i.e.
623 # "If s and the local DC's nTDSDSA object are in the same
624 # site, cn!transportType has no value, or the RDN of
625 # cn!transportType is CN=IP:"
627 # could be a slightly tighter statement if it had an "or"
628 # between each condition. I believe this should
631 # IF (same-site) OR (no-value) OR (type-ip)
633 # because IP should be the primary transport mechanism
634 # (even in inter-site) and the absense of the transportType
635 # attribute should always imply IP no matter if its multi-site
637 # NOTE MS-TECH INCORRECT:
639 # All indications point to these statements above being
640 # incorrectly stated:
642 # t.uuidDsa = The GUID-based DNS name of s.
644 # Let a be the attribute identified by
645 # x!transportAddressAttribute. If a is
646 # the dNSHostName attribute, t.uuidDsa = the GUID-based
647 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
649 # because the uuidDSA is a GUID and not a GUID-base DNS
650 # name. Nor can uuidDsa hold (s!parent)!a if not
651 # dNSHostName. What should have been said is:
653 # t.naDsa = The GUID-based DNS name of s
655 # That would also be correct if transportAddressAttribute
656 # were "mailAddress" because (naDsa) can also correctly
657 # hold the SMTP ISM service address.
659 nastr
= "%s._msdcs.%s" % (s_dsa
.dsa_guid
, self
.samdb
.forest_dns_name())
661 # We're not currently supporting SMTP replication
662 # so is_smtp_replication_available() is currently
663 # always returning False
665 cn_conn
.transport_dnstr
is None or
666 cn_conn
.transport_dnstr
.find("CN=IP") == 0 or
667 not is_smtp_replication_available()):
669 if (t_repsFrom
.replica_flags
&
670 drsuapi
.DRSUAPI_DRS_MAIL_REP
) != 0x0:
671 t_repsFrom
.replica_flags
&= ~drsuapi
.DRSUAPI_DRS_MAIL_REP
673 null_guid
= misc
.GUID()
674 if (t_repsFrom
.transport_guid
is None or
675 t_repsFrom
.transport_guid
!= null_guid
):
676 t_repsFrom
.transport_guid
= null_guid
678 # See (NOTE MS-TECH INCORRECT) above
679 if t_repsFrom
.version
== 0x1:
680 if t_repsFrom
.dns_name1
is None or \
681 t_repsFrom
.dns_name1
!= nastr
:
682 t_repsFrom
.dns_name1
= nastr
684 if t_repsFrom
.dns_name1
is None or \
685 t_repsFrom
.dns_name2
is None or \
686 t_repsFrom
.dns_name1
!= nastr
or \
687 t_repsFrom
.dns_name2
!= nastr
:
688 t_repsFrom
.dns_name1
= nastr
689 t_repsFrom
.dns_name2
= nastr
692 if (t_repsFrom
.replica_flags
&
693 drsuapi
.DRSUAPI_DRS_MAIL_REP
) == 0x0:
694 t_repsFrom
.replica_flags |
= drsuapi
.DRSUAPI_DRS_MAIL_REP
696 # We have a transport type but its not an
697 # object in the database
698 if cn_conn
.transport_guid
not in self
.transport_table
.keys():
699 raise Exception("Missing inter-site transport - (%s)" %
700 cn_conn
.transport_dnstr
)
702 x_transport
= self
.transport_table
[str(cn_conn
.transport_guid
)]
704 if t_repsFrom
.transport_guid
!= x_transport
.guid
:
705 t_repsFrom
.transport_guid
= x_transport
.guid
707 # See (NOTE MS-TECH INCORRECT) above
708 if x_transport
.address_attr
== "dNSHostName":
710 if t_repsFrom
.version
== 0x1:
711 if t_repsFrom
.dns_name1
is None or \
712 t_repsFrom
.dns_name1
!= nastr
:
713 t_repsFrom
.dns_name1
= nastr
715 if t_repsFrom
.dns_name1
is None or \
716 t_repsFrom
.dns_name2
is None or \
717 t_repsFrom
.dns_name1
!= nastr
or \
718 t_repsFrom
.dns_name2
!= nastr
:
719 t_repsFrom
.dns_name1
= nastr
720 t_repsFrom
.dns_name2
= nastr
723 # MS tech specification says we retrieve the named
724 # attribute in "transportAddressAttribute" from the parent of
727 pdnstr
= s_dsa
.get_parent_dnstr()
728 attrs
= [ x_transport
.address_attr
]
730 res
= self
.samdb
.search(base
=pdnstr
, scope
=ldb
.SCOPE_BASE
,
732 except ldb
.LdbError
, (enum
, estr
):
734 "Unable to find attr (%s) for (%s) - (%s)" %
735 (x_transport
.address_attr
, pdnstr
, estr
))
738 nastr
= str(msg
[x_transport
.address_attr
][0])
740 # See (NOTE MS-TECH INCORRECT) above
741 if t_repsFrom
.version
== 0x1:
742 if t_repsFrom
.dns_name1
is None or \
743 t_repsFrom
.dns_name1
!= nastr
:
744 t_repsFrom
.dns_name1
= nastr
746 if t_repsFrom
.dns_name1
is None or \
747 t_repsFrom
.dns_name2
is None or \
748 t_repsFrom
.dns_name1
!= nastr
or \
749 t_repsFrom
.dns_name2
!= nastr
:
751 t_repsFrom
.dns_name1
= nastr
752 t_repsFrom
.dns_name2
= nastr
754 if t_repsFrom
.is_modified():
755 logger
.debug("modify_repsFrom(): %s" % t_repsFrom
)
757 def is_repsFrom_implied(self
, n_rep
, cn_conn
):
758 """Given a NC replica and NTDS Connection, determine if the connection
759 implies a repsFrom tuple should be present from the source DSA listed
760 in the connection to the naming context
762 :param n_rep: NC replica
763 :param conn: NTDS Connection
764 ::returns (True || False), source DSA:
766 # NTDS Connection must satisfy all the following criteria
767 # to imply a repsFrom tuple is needed:
769 # cn!enabledConnection = true.
770 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
771 # cn!fromServer references an nTDSDSA object.
774 if cn_conn
.is_enabled() and not cn_conn
.is_rodc_topology():
776 s_dnstr
= cn_conn
.get_from_dnstr()
777 if s_dnstr
is not None:
778 s_dsa
= self
.get_dsa(s_dnstr
)
780 # No DSA matching this source DN string?
784 # To imply a repsFrom tuple is needed, each of these
787 # An NC replica of the NC "is present" on the DC to
788 # which the nTDSDSA object referenced by cn!fromServer
791 # An NC replica of the NC "should be present" on
793 s_rep
= s_dsa
.get_current_replica(n_rep
.nc_dnstr
)
795 if s_rep
is None or not s_rep
.is_present():
798 # To imply a repsFrom tuple is needed, each of these
801 # The NC replica on the DC referenced by cn!fromServer is
802 # a writable replica or the NC replica that "should be
803 # present" on the local DC is a partial replica.
805 # The NC is not a domain NC, the NC replica that
806 # "should be present" on the local DC is a partial
807 # replica, cn!transportType has no value, or
808 # cn!transportType has an RDN of CN=IP.
810 implied
= (not s_rep
.is_ro() or n_rep
.is_partial()) and \
811 (not n_rep
.is_domain() or
812 n_rep
.is_partial() or
813 cn_conn
.transport_dnstr
is None or
814 cn_conn
.transport_dnstr
.find("CN=IP") == 0)
821 def translate_ntdsconn(self
):
822 """This function adjusts values of repsFrom abstract attributes of NC
823 replicas on the local DC to match those implied by
824 nTDSConnection objects.
826 logger
.debug("translate_ntdsconn(): enter")
828 if self
.my_dsa
.is_translate_ntdsconn_disabled():
831 current_rep_table
, needed_rep_table
= self
.my_dsa
.get_rep_tables()
833 # Filled in with replicas we currently have that need deleting
834 delete_rep_table
= {}
836 # We're using the MS notation names here to allow
837 # correlation back to the published algorithm.
839 # n_rep - NC replica (n)
840 # t_repsFrom - tuple (t) in n!repsFrom
841 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
842 # object (s) such that (s!objectGUID = t.uuidDsa)
843 # In our IDL representation of repsFrom the (uuidDsa)
844 # attribute is called (source_dsa_obj_guid)
845 # cn_conn - (cn) is nTDSConnection object and child of the local DC's
846 # nTDSDSA object and (cn!fromServer = s)
847 # s_rep - source DSA replica of n
849 # If we have the replica and its not needed
850 # then we add it to the "to be deleted" list.
851 for dnstr
, n_rep
in current_rep_table
.items():
852 if dnstr
not in needed_rep_table
.keys():
853 delete_rep_table
[dnstr
] = n_rep
855 # Now perform the scan of replicas we'll need
856 # and compare any current repsFrom against the
858 for dnstr
, n_rep
in needed_rep_table
.items():
860 # load any repsFrom and fsmo roles as we'll
861 # need them during connection translation
862 n_rep
.load_repsFrom(self
.samdb
)
863 n_rep
.load_fsmo_roles(self
.samdb
)
865 # Loop thru the existing repsFrom tupples (if any)
866 for i
, t_repsFrom
in enumerate(n_rep
.rep_repsFrom
):
868 # for each tuple t in n!repsFrom, let s be the nTDSDSA
869 # object such that s!objectGUID = t.uuidDsa
870 guidstr
= str(t_repsFrom
.source_dsa_obj_guid
)
871 s_dsa
= self
.get_dsa_by_guidstr(guidstr
)
873 # Source dsa is gone from config (strange)
874 # so cleanup stale repsFrom for unlisted DSA
876 logger
.debug("repsFrom source DSA guid (%s) not found" %
878 t_repsFrom
.to_be_deleted
= True
881 s_dnstr
= s_dsa
.dsa_dnstr
883 # Retrieve my DSAs connection object (if it exists)
884 # that specifies the fromServer equivalent to
885 # the DSA that is specified in the repsFrom source
886 cn_conn
= self
.my_dsa
.get_connection_by_from_dnstr(s_dnstr
)
888 # Let (cn) be the nTDSConnection object such that (cn)
889 # is a child of the local DC's nTDSDSA object and
890 # (cn!fromServer = s) and (cn!options) does not contain
891 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
892 if cn_conn
and cn_conn
.is_rodc_topology():
895 # KCC removes this repsFrom tuple if any of the following
899 # No NC replica of the NC "is present" on DSA that
900 # would be source of replica
902 # A writable replica of the NC "should be present" on
903 # the local DC, but a partial replica "is present" on
905 s_rep
= s_dsa
.get_current_replica(n_rep
.nc_dnstr
)
907 if cn_conn
is None or \
908 s_rep
is None or not s_rep
.is_present() or \
909 (not n_rep
.is_ro() and s_rep
.is_partial()):
911 t_repsFrom
.to_be_deleted
= True
914 # If the KCC did not remove t from n!repsFrom, it updates t
915 self
.modify_repsFrom(n_rep
, t_repsFrom
, s_rep
, s_dsa
, cn_conn
)
917 # Loop thru connections and add implied repsFrom tuples
918 # for each NTDSConnection under our local DSA if the
919 # repsFrom is not already present
920 for cn_dnstr
, cn_conn
in self
.my_dsa
.connect_table
.items():
922 implied
, s_dsa
= self
.is_repsFrom_implied(n_rep
, cn_conn
)
926 # Loop thru the existing repsFrom tupples (if any) and
927 # if we already have a tuple for this connection then
928 # no need to proceed to add. It will have been changed
929 # to have the correct attributes above
930 for i
, t_repsFrom
in enumerate(n_rep
.rep_repsFrom
):
932 guidstr
= str(t_repsFrom
.source_dsa_obj_guid
)
933 if s_dsa
is self
.get_dsa_by_guidstr(guidstr
):
940 # Create a new RepsFromTo and proceed to modify
941 # it according to specification
942 t_repsFrom
= RepsFromTo(n_rep
.nc_dnstr
)
944 t_repsFrom
.source_dsa_obj_guid
= s_dsa
.dsa_guid
946 s_rep
= s_dsa
.get_current_replica(n_rep
.nc_dnstr
)
948 self
.modify_repsFrom(n_rep
, t_repsFrom
, s_rep
, s_dsa
, cn_conn
)
950 # Add to our NC repsFrom as this is newly computed
951 if t_repsFrom
.is_modified():
952 n_rep
.rep_repsFrom
.append(t_repsFrom
)
955 # Display any to be deleted or modified repsFrom
956 text
= n_rep
.dumpstr_to_be_deleted()
958 logger
.info("TO BE DELETED:\n%s" % text
)
959 text
= n_rep
.dumpstr_to_be_modified()
961 logger
.info("TO BE MODIFIED:\n%s" % text
)
963 # Peform deletion from our tables but perform
964 # no database modification
965 n_rep
.commit_repsFrom(self
.samdb
, ro
=True)
967 # Commit any modified repsFrom to the NC replica
968 n_rep
.commit_repsFrom(self
.samdb
)
970 def keep_connection(self
, cn_conn
):
971 """Determines if the connection is meant to be kept during the
972 pruning of unneeded connections operation.
974 Consults the keep_connection_list[] which was built during
975 intersite NC replica graph computation.
977 ::returns (True or False): if (True) connection should not be pruned
979 if cn_conn
in self
.keep_connection_list
:
983 def merge_failed_links(self
):
984 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
985 The KCC on a writable DC attempts to merge the link and connection
986 failure information from bridgehead DCs in its own site to help it
987 identify failed bridgehead DCs.
989 # MS-TECH Ref 6.2.2.3.2 Merge of kCCFailedLinks and kCCFailedLinks
992 # 1. Queries every bridgehead server in your site (other than yourself)
993 # 2. For every ntDSConnection that references a server in a different
994 # site merge all the failure info
996 # XXX - not implemented yet
998 def setup_graph(self
, part
):
999 """Set up a GRAPH, populated with a VERTEX for each site
1000 object, a MULTIEDGE for each siteLink object, and a
1001 MUTLIEDGESET for each siteLinkBridge object (or implied
1004 ::returns: a new graph
1008 g
= IntersiteGraph()
1010 for site_dn
, site
in self
.site_table
.items():
1011 vertex
= Vertex(site
, part
)
1012 vertex
.guid
= site_dn
1013 g
.vertices
.add(vertex
)
1015 if not dn_to_vertex
.get(site_dn
):
1016 dn_to_vertex
[site_dn
] = []
1018 dn_to_vertex
[site_dn
].append(vertex
)
1020 connected_vertices
= set()
1021 for transport_guid
, transport
in self
.transport_table
.items():
1022 # Currently only ever "IP"
1023 for site_link_dn
, site_link
in self
.sitelink_table
.items():
1024 new_edge
= create_edge(transport_guid
, site_link
, dn_to_vertex
)
1025 connected_vertices
.update(new_edge
.vertices
)
1026 g
.edges
.add(new_edge
)
1028 # If 'Bridge all site links' is enabled and Win2k3 bridges required is not set
1029 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1030 # No documentation for this however, ntdsapi.h appears to have listed:
1031 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1032 if ((self
.my_site
.site_options
& 0x00000002) == 0
1033 and (self
.my_site
.site_options
& 0x00001000) == 0):
1034 g
.edge_set
.add(create_auto_edge_set(g
, transport_guid
))
1036 # TODO get all site link bridges
1037 for site_link_bridge
in []:
1038 g
.edge_set
.add(create_edge_set(g
, transport_guid
,
1041 g
.connected_vertices
= connected_vertices
1045 def get_bridgehead(self
, site
, part
, transport
, partial_ok
, detect_failed
):
1046 """Get a bridghead DC.
1048 :param site: site object representing for which a bridgehead
1050 :param part: crossRef for NC to replicate.
1051 :param transport: interSiteTransport object for replication
1053 :param partial_ok: True if a DC containing a partial
1054 replica or a full replica will suffice, False if only
1055 a full replica will suffice.
1056 :param detect_failed: True to detect failed DCs and route
1057 replication traffic around them, False to assume no DC
1059 ::returns: dsa object for the bridgehead DC or None
1062 bhs
= self
.get_all_bridgeheads(site
, part
, transport
,
1063 partial_ok
, detect_failed
)
1065 logger
.debug("get_bridgehead: exit\n\tsitedn=%s\n\tbhdn=None" %
1069 logger
.debug("get_bridgehead: exit\n\tsitedn=%s\n\tbhdn=%s" %
1070 (site
.site_dnstr
, bhs
[0].dsa_dnstr
))
1073 def get_all_bridgeheads(self
, site
, part
, transport
,
1074 partial_ok
, detect_failed
):
1075 """Get all bridghead DCs satisfying the given criteria
1077 :param site: site object representing the site for which
1078 bridgehead DCs are desired.
1079 :param part: partition for NC to replicate.
1080 :param transport: interSiteTransport object for
1081 replication traffic.
1082 :param partial_ok: True if a DC containing a partial
1083 replica or a full replica will suffice, False if
1084 only a full replica will suffice.
1085 :param detect_ok: True to detect failed DCs and route
1086 replication traffic around them, FALSE to assume
1088 ::returns: list of dsa object for available bridgehead
1094 logger
.debug("get_all_bridgeheads: %s" % transport
)
1096 for key
, dsa
in site
.dsa_table
.items():
1098 pdnstr
= dsa
.get_parent_dnstr()
1100 # IF t!bridgeheadServerListBL has one or more values and
1101 # t!bridgeheadServerListBL does not contain a reference
1102 # to the parent object of dc then skip dc
1103 if (len(transport
.bridgehead_list
) != 0 and
1104 pdnstr
not in transport
.bridgehead_list
):
1107 # IF dc is in the same site as the local DC
1108 # IF a replica of cr!nCName is not in the set of NC replicas
1109 # that "should be present" on dc or a partial replica of the
1110 # NC "should be present" but partialReplicasOkay = FALSE
1112 if self
.my_site
.same_site(dsa
):
1113 needed
, ro
, partial
= part
.should_be_present(dsa
)
1114 if not needed
or (partial
and not partial_ok
):
1118 # IF an NC replica of cr!nCName is not in the set of NC
1119 # replicas that "are present" on dc or a partial replica of
1120 # the NC "is present" but partialReplicasOkay = FALSE
1123 rep
= dsa
.get_current_replica(part
.nc_dnstr
)
1124 if rep
is None or (rep
.is_partial() and not partial_ok
):
1127 # IF AmIRODC() and cr!nCName corresponds to default NC then
1128 # Let dsaobj be the nTDSDSA object of the dc
1129 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1131 if self
.my_dsa
.is_ro() and part
.is_default():
1132 if not dsa
.is_minimum_behavior(dsdb
.DS_DOMAIN_FUNCTION_2008
):
1135 # IF t!name != "IP" and the parent object of dc has no value for
1136 # the attribute specified by t!transportAddressAttribute
1138 if transport
.name
!= "IP":
1139 # MS tech specification says we retrieve the named
1140 # attribute in "transportAddressAttribute" from the parent
1143 attrs
= [ transport
.address_attr
]
1145 res
= self
.samdb
.search(base
=pdnstr
, scope
=ldb
.SCOPE_BASE
,
1147 except ldb
.LdbError
, (enum
, estr
):
1151 if transport
.address_attr
not in msg
:
1154 nastr
= str(msg
[transport
.address_attr
][0])
1156 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1158 if self
.is_bridgehead_failed(dsa
, detect_failed
):
1161 logger
.debug("get_all_bridgeheads: dsadn=%s" % dsa
.dsa_dnstr
)
1164 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1166 # SORT bhs such that all GC servers precede DCs that are not GC
1167 # servers, and otherwise by ascending objectGUID
1169 # SORT bhs in a random order
1170 if site
.is_random_bridgehead_disabled():
1171 bhs
.sort(sort_dsa_by_gc_and_guid
)
1178 def is_bridgehead_failed(self
, dsa
, detect_failed
):
1179 """Determine whether a given DC is known to be in a failed state
1180 ::returns: True if and only if the DC should be considered failed
1182 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1183 # When DETECT_STALE_DISABLED, we can never know of if it's in a failed state
1184 if self
.my_site
.site_options
& 0x00000008:
1186 elif self
.is_stale_link_connection(dsa
):
1189 return detect_failed
1191 def create_connection(self
, part
, rbh
, rsite
, transport
,
1192 lbh
, lsite
, link_opt
, link_sched
,
1193 partial_ok
, detect_failed
):
1194 """Create an nTDSConnection object with the given parameters
1195 if one does not already exist.
1197 :param part: crossRef object for the NC to replicate.
1198 :param rbh: nTDSDSA object for DC to act as the
1199 IDL_DRSGetNCChanges server (which is in a site other
1200 than the local DC's site).
1201 :param rsite: site of the rbh
1202 :param transport: interSiteTransport object for the transport
1203 to use for replication traffic.
1204 :param lbh: nTDSDSA object for DC to act as the
1205 IDL_DRSGetNCChanges client (which is in the local DC's site).
1206 :param lsite: site of the lbh
1207 :param link_opt: Replication parameters (aggregated siteLink options, etc.)
1208 :param link_sched: Schedule specifying the times at which
1209 to begin replicating.
1210 :partial_ok: True if bridgehead DCs containing partial
1211 replicas of the NC are acceptable.
1212 :param detect_failed: True to detect failed DCs and route
1213 replication traffic around them, FALSE to assume no DC
1216 rbhs_all
= self
.get_all_bridgeheads(rsite
, part
, transport
,
1219 # MS-TECH says to compute rbhs_avail but then doesn't use it
1220 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1221 # partial_ok, detect_failed)
1223 lbhs_all
= self
.get_all_bridgeheads(lsite
, part
, transport
,
1226 # MS-TECH says to compute lbhs_avail but then doesn't use it
1227 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1228 # partial_ok, detect_failed)
1230 # FOR each nTDSConnection object cn such that the parent of cn is
1231 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1232 for ldsa
in lbhs_all
:
1233 for cn
in ldsa
.connect_table
.values():
1236 for rdsa
in rbhs_all
:
1237 if cn
.from_dnstr
== rdsa
.dsa_dnstr
:
1243 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1244 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1245 # cn!transportType references t
1246 if (cn
.is_generated() and not cn
.is_rodc_topology() and
1247 cn
.transport_guid
== transport
.guid
):
1249 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1250 # cn!options and cn!schedule != sch
1251 # Perform an originating update to set cn!schedule to
1253 if (not cn
.is_user_owned_schedule() and
1254 not cn
.is_equivalent_schedule(link_sched
)):
1255 cn
.schedule
= link_sched
1256 cn
.set_modified(True)
1258 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1259 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1260 if cn
.is_override_notify_default() and \
1263 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1265 # Perform an originating update to clear bits
1266 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1267 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1268 if (link_opt
& dsdb
.NTDSSITELINK_OPT_USE_NOTIFY
) == 0:
1270 ~
(dsdb
.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1271 dsdb
.NTDSCONN_OPT_USE_NOTIFY
)
1272 cn
.set_modified(True)
1277 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1279 # Perform an originating update to set bits
1280 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1281 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1282 if (link_opt
& dsdb
.NTDSSITELINK_OPT_USE_NOTIFY
) != 0:
1284 (dsdb
.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1285 dsdb
.NTDSCONN_OPT_USE_NOTIFY
)
1286 cn
.set_modified(True)
1289 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1290 if cn
.is_twoway_sync():
1292 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1294 # Perform an originating update to clear bit
1295 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1296 if (link_opt
& dsdb
.NTDSSITELINK_OPT_TWOWAY_SYNC
) == 0:
1297 cn
.options
&= ~dsdb
.NTDSCONN_OPT_TWOWAY_SYNC
1298 cn
.set_modified(True)
1303 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1305 # Perform an originating update to set bit
1306 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1307 if (link_opt
& dsdb
.NTDSSITELINK_OPT_TWOWAY_SYNC
) != 0:
1308 cn
.options |
= dsdb
.NTDSCONN_OPT_TWOWAY_SYNC
1309 cn
.set_modified(True)
1312 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1314 if cn
.is_intersite_compression_disabled():
1316 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1318 # Perform an originating update to clear bit
1319 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1322 dsdb
.NTDSSITELINK_OPT_DISABLE_COMPRESSION
) == 0:
1324 ~dsdb
.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1325 cn
.set_modified(True)
1329 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1331 # Perform an originating update to set bit
1332 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1335 dsdb
.NTDSSITELINK_OPT_DISABLE_COMPRESSION
) != 0:
1337 dsdb
.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1338 cn
.set_modified(True)
1340 # Display any modified connection
1342 if cn
.to_be_modified
:
1343 logger
.info("TO BE MODIFIED:\n%s" % cn
)
1345 ldsa
.commit_connections(self
.samdb
, ro
=True)
1347 ldsa
.commit_connections(self
.samdb
)
1350 valid_connections
= 0
1352 # FOR each nTDSConnection object cn such that cn!parent is
1353 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1354 for ldsa
in lbhs_all
:
1355 for cn
in ldsa
.connect_table
.values():
1358 for rdsa
in rbhs_all
:
1359 if cn
.from_dnstr
== rdsa
.dsa_dnstr
:
1365 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1366 # cn!transportType references t) and
1367 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1368 if ((not cn
.is_generated() or
1369 cn
.transport_guid
== transport
.guid
) and
1370 not cn
.is_rodc_topology()):
1372 # LET rguid be the objectGUID of the nTDSDSA object
1373 # referenced by cn!fromServer
1374 # LET lguid be (cn!parent)!objectGUID
1376 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1377 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1378 # Increment cValidConnections by 1
1379 if (not self
.is_bridgehead_failed(rdsa
, detect_failed
) and
1380 not self
.is_bridgehead_failed(ldsa
, detect_failed
)):
1381 valid_connections
+= 1
1383 # IF keepConnections does not contain cn!objectGUID
1384 # APPEND cn!objectGUID to keepConnections
1385 if not self
.keep_connection(cn
):
1386 self
.keep_connection_list
.append(cn
)
1390 # IF cValidConnections = 0
1391 if valid_connections
== 0:
1393 # LET opt be NTDSCONN_OPT_IS_GENERATED
1394 opt
= dsdb
.NTDSCONN_OPT_IS_GENERATED
1396 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1397 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1398 # NTDSCONN_OPT_USE_NOTIFY in opt
1399 if (link_opt
& dsdb
.NTDSSITELINK_OPT_USE_NOTIFY
) != 0:
1400 opt |
= (dsdb
.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1401 dsdb
.NTDSCONN_OPT_USE_NOTIFY
)
1403 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1404 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1405 if (link_opt
& dsdb
.NTDSSITELINK_OPT_TWOWAY_SYNC
) != 0:
1406 opt |
= dsdb
.NTDSCONN_OPT_TWOWAY_SYNC
1408 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1410 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1412 dsdb
.NTDSSITELINK_OPT_DISABLE_COMPRESSION
) != 0:
1413 opt |
= dsdb
.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1415 # Perform an originating update to create a new nTDSConnection
1416 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1417 # cn!options = opt, cn!transportType is a reference to t,
1418 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1419 cn
= lbh
.new_connection(opt
, 0, transport
, lbh
.dsa_dnstr
, link_sched
)
1421 # Display any added connection
1424 logger
.info("TO BE ADDED:\n%s" % cn
)
1426 lbh
.commit_connections(self
.samdb
, ro
=True)
1428 lbh
.commit_connections(self
.samdb
)
1430 # APPEND cn!objectGUID to keepConnections
1431 if not self
.keep_connection(cn
):
1432 self
.keep_connection_list
.append(cn
)
1434 def add_transports(self
, vertex
, local_vertex
, graph
, detect_failed
):
1435 vertex
.accept_red_red
= []
1436 vertex
.accept_black
= []
1437 found_failed
= False
1438 for t_guid
, transport
in self
.transport_table
.items():
1439 # FLAG_CR_NTDS_DOMAIN 0x00000002
1440 if (local_vertex
.is_red() and transport
!= "IP" and
1441 vertex
.part
.system_flags
& 0x00000002):
1444 if vertex
in graph
.connected_vertices
:
1447 partial_replica_okay
= vertex
.is_black()
1449 bh
= self
.get_bridgehead(local_vertex
.site
, vertex
.part
, transport
,
1450 partial_replica_okay
, detect_failed
)
1455 vertex
.accept_red_red
.append(t_guid
)
1456 vertex
.accept_black
.append(t_guid
)
1458 # Add additional transport to allow another run of Dijkstra
1459 vertex
.accept_red_red
.append("EDGE_TYPE_ALL")
1460 vertex
.accept_black
.append("EDGE_TYPE_ALL")
1464 def create_connections(self
, graph
, part
, detect_failed
):
1465 """Construct an NC replica graph for the NC identified by
1466 the given crossRef, then create any additional nTDSConnection
1469 :param graph: site graph.
1470 :param part: crossRef object for NC.
1471 :param detect_failed: True to detect failed DCs and route
1472 replication traffic around them, False to assume no DC
1475 Modifies self.keep_connection_list by adding any connections
1476 deemed to be "in use".
1478 ::returns: (all_connected, found_failed_dc)
1479 (all_connected) True if the resulting NC replica graph
1480 connects all sites that need to be connected.
1481 (found_failed_dc) True if one or more failed DCs were
1484 all_connected
= True
1485 found_failed
= False
1487 logger
.debug("create_connections(): enter\n\tpartdn=%s\n\tdetect_failed=%s" %
1488 (part
.nc_dnstr
, detect_failed
))
1490 # XXX - This is a highly abbreviated function from the MS-TECH
1491 # ref. It creates connections between bridgeheads to all
1492 # sites that have appropriate replicas. Thus we are not
1493 # creating a minimum cost spanning tree but instead
1494 # producing a fully connected tree. This should produce
1495 # a full (albeit not optimal cost) replication topology.
1497 my_vertex
= Vertex(self
.my_site
, part
)
1498 my_vertex
.color_vertex()
1500 for v
in graph
.vertices
:
1502 self
.add_transports(v
, my_vertex
, graph
, detect_failed
)
1504 # No NC replicas for this NC in the site of the local DC,
1505 # so no nTDSConnection objects need be created
1506 if my_vertex
.is_white():
1507 return all_connected
, found_failed
1509 edge_list
, component_count
= self
.get_spanning_tree_edges(graph
)
1511 if component_count
> 1:
1512 all_connected
= False
1514 # LET partialReplicaOkay be TRUE if and only if
1515 # localSiteVertex.Color = COLOR.BLACK
1516 if my_vertex
.is_black():
1521 # Utilize the IP transport only for now
1523 for transport
in self
.transport_table
.values():
1524 if transport
.name
== "IP":
1527 if transport
is None:
1528 raise Exception("Unable to find inter-site transport for IP")
1531 if e
.directed
and e
.vertices
[0].site
is self
.my_site
: # more accurate comparison?
1534 if e
.vertices
[0].site
is self
.my_site
:
1535 rsite
= e
.vertices
[1]
1537 rsite
= e
.vertices
[0]
1539 # We don't make connections to our own site as that
1540 # is intrasite topology generator's job
1541 if rsite
is self
.my_site
:
1544 # Determine bridgehead server in remote site
1545 rbh
= self
.get_bridgehead(rsite
, part
, transport
,
1546 partial_ok
, detect_failed
)
1548 # RODC acts as an BH for itself
1550 # LET lbh be the nTDSDSA object of the local DC
1552 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1553 # cr, t, partialReplicaOkay, detectFailedDCs)
1554 if self
.my_dsa
.is_ro():
1555 lsite
= self
.my_site
1558 lsite
= self
.my_site
1559 lbh
= self
.get_bridgehead(lsite
, part
, transport
,
1560 partial_ok
, detect_failed
)
1562 sitelink
= e
.site_link
1563 if sitelink
is None:
1567 link_opt
= sitelink
.options
1568 link_sched
= sitelink
.schedule
1570 self
.create_connection(part
, rbh
, rsite
, transport
,
1571 lbh
, lsite
, link_opt
, link_sched
,
1572 partial_ok
, detect_failed
)
1574 return all_connected
, found_failed
1576 def create_intersite_connections(self
):
1577 """Computes an NC replica graph for each NC replica that "should be
1578 present" on the local DC or "is present" on any DC in the same site
1579 as the local DC. For each edge directed to an NC replica on such a
1580 DC from an NC replica on a DC in another site, the KCC creates an
1581 nTDSConnection object to imply that edge if one does not already
1584 Modifies self.keep_connection_list - A list of nTDSConnection
1585 objects for edges that are directed
1586 to the local DC's site in one or more NC replica graphs.
1588 returns: True if spanning trees were created for all NC replica
1589 graphs, otherwise False.
1591 all_connected
= True
1592 self
.keep_connection_list
= []
1594 # LET crossRefList be the set containing each object o of class
1595 # crossRef such that o is a child of the CN=Partitions child of the
1598 # FOR each crossRef object cr in crossRefList
1599 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1600 # is clear in cr!systemFlags, skip cr.
1601 # LET g be the GRAPH return of SetupGraph()
1603 for part
in self
.part_table
.values():
1605 if not part
.is_enabled():
1608 if part
.is_foreign():
1611 graph
= self
.setup_graph(part
)
1613 # Create nTDSConnection objects, routing replication traffic
1614 # around "failed" DCs.
1615 found_failed
= False
1617 connected
, found_failed
= self
.create_connections(graph
, part
, True)
1620 all_connected
= False
1623 # One or more failed DCs preclude use of the ideal NC
1624 # replica graph. Add connections for the ideal graph.
1625 self
.create_connections(graph
, part
, False)
1627 return all_connected
1629 def get_spanning_tree_edges(self
, graph
):
1630 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
1631 # just the shortest-paths connecting colored vertices
1633 internal_edges
= set()
1635 for e_set
in graph
.edge_set
:
1637 for v
in graph
.vertices
:
1640 # All con_type in an edge set is the same
1641 for e
in e_set
.edges
:
1642 edgeType
= e
.con_type
1643 for v
in e
.vertices
:
1646 # Run dijkstra's algorithm with just the red vertices as seeds
1647 # Seed from the full replicas
1648 dijkstra(graph
, edgeType
, False)
1651 process_edge_set(graph
, e_set
, internal_edges
)
1653 # Run dijkstra's algorithm with red and black vertices as the seeds
1654 # Seed from both full and partial replicas
1655 dijkstra(graph
, edgeType
, True)
1658 process_edge_set(graph
, e_set
, internal_edges
)
1660 # All vertices have root/component as itself
1661 setup_vertices(graph
)
1662 process_edge_set(graph
, None, internal_edges
)
1664 # Phase 2: Run Kruskal's on the internal edges
1665 output_edges
, components
= kruskal(graph
, internal_edges
)
1667 # This recalculates the cost for the path connecting the closest red vertex
1668 # Ignoring types is fine because NO suboptimal edge should exist in the graph
1669 dijkstra(graph
, "EDGE_TYPE_ALL", False) # TODO rename
1670 # Phase 3: Process the output
1671 for v
in graph
.vertices
:
1675 v
.dist_to_red
= v
.repl_info
.cost
1677 # count the components
1678 return self
.copy_output_edges(graph
, output_edges
), components
1680 # This ensures only one-way connections for partial-replicas
1681 def copy_output_edges(self
, graph
, output_edges
):
1683 vid
= self
.my_site
# object guid for the local dc's site
1685 for edge
in output_edges
:
1686 # Three-way edges are no problem here since these were created by
1687 # add_out_edge which only has two endpoints
1688 v
= edge
.vertices
[0]
1689 w
= edge
.vertices
[1]
1690 if v
.site
is vid
or w
.site
is vid
:
1691 if (v
.is_black() or w
.is_black()) and not v
.dist_to_red
== 2 ** 32 - 1:
1692 edge
.directed
= True
1694 if w
.dist_to_red
< v
.dist_to_red
:
1695 edge
.vertices
[0] = w
1696 edge
.vertices
[1] = v
1698 edge_list
.append(edge
)
1702 def intersite(self
):
1703 """The head method for generating the inter-site KCC replica
1704 connection graph and attendant nTDSConnection objects
1707 Produces self.keep_connection_list[] of NTDS Connections
1708 that should be kept during subsequent pruning process.
1710 ::return (True or False): (True) if the produced NC replica
1711 graph connects all sites that need to be connected
1716 mysite
= self
.my_site
1717 all_connected
= True
1719 logger
.debug("intersite(): enter")
1721 # Determine who is the ISTG
1723 mysite
.select_istg(self
.samdb
, mydsa
, ro
=True)
1725 mysite
.select_istg(self
.samdb
, mydsa
, ro
=False)
1727 # Test whether local site has topology disabled
1728 if mysite
.is_intersite_topology_disabled():
1729 logger
.debug("intersite(): exit disabled all_connected=%d" %
1731 return all_connected
1733 if not mydsa
.is_istg():
1734 logger
.debug("intersite(): exit not istg all_connected=%d" %
1736 return all_connected
1738 self
.merge_failed_links()
1740 # For each NC with an NC replica that "should be present" on the
1741 # local DC or "is present" on any DC in the same site as the
1742 # local DC, the KCC constructs a site graph--a precursor to an NC
1743 # replica graph. The site connectivity for a site graph is defined
1744 # by objects of class interSiteTransport, siteLink, and
1745 # siteLinkBridge in the config NC.
1747 all_connected
= self
.create_intersite_connections()
1749 logger
.debug("intersite(): exit all_connected=%d" % all_connected
)
1750 return all_connected
1752 def update_rodc_connection(self
):
1753 """Runs when the local DC is an RODC and updates the RODC NTFRS
1756 # Given an nTDSConnection object cn1, such that cn1.options contains
1757 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1758 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1759 # that the following is true:
1761 # cn1.fromServer = cn2.fromServer
1762 # cn1.schedule = cn2.schedule
1764 # If no such cn2 can be found, cn1 is not modified.
1765 # If no such cn1 can be found, nothing is modified by this task.
1767 if not self
.my_dsa
.is_ro():
1771 # Find cn2 - the DRS NTDSConnection
1772 for con
in self
.my_dsa
.connect_table
.values():
1773 if not con
.is_rodc_topology():
1777 # Find cn1 - the FRS NTDSConnection
1779 for con
in self
.my_dsa
.connect_table
.values():
1780 if con
.is_rodc_topology():
1781 con
.from_dnstr
= cn2
.from_dnstr
1782 con
.schedule
= cn2
.schedule
1783 con
.to_be_modified
= True
1785 # Commit changes to the database
1786 self
.my_dsa
.commit_connections(self
.samdb
, ro
=opts
.readonly
)
1788 def intrasite_max_node_edges(self
, node_count
):
1789 """Returns the maximum number of edges directed to a node in
1790 the intrasite replica graph.
1792 The KCC does not create more
1793 than 50 edges directed to a single DC. To optimize replication,
1794 we compute that each node should have n+2 total edges directed
1795 to it such that (n) is the smallest non-negative integer
1796 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1798 :param node_count: total number of nodes in the replica graph
1802 if node_count
<= (2 * (n
* n
) + (6 * n
) + 7):
1810 def construct_intrasite_graph(self
, site_local
, dc_local
,
1811 nc_x
, gc_only
, detect_stale
):
1813 # We're using the MS notation names here to allow
1814 # correlation back to the published algorithm.
1816 # nc_x - naming context (x) that we are testing if it
1817 # "should be present" on the local DC
1818 # f_of_x - replica (f) found on a DC (s) for NC (x)
1819 # dc_s - DC where f_of_x replica was found
1820 # dc_local - local DC that potentially needs a replica
1822 # r_list - replica list R
1823 # p_of_x - replica (p) is partial and found on a DC (s)
1825 # l_of_x - replica (l) is the local replica for NC (x)
1826 # that should appear on the local DC
1827 # r_len = is length of replica list |R|
1829 # If the DSA doesn't need a replica for this
1830 # partition (NC x) then continue
1831 needed
, ro
, partial
= nc_x
.should_be_present(dc_local
)
1833 logger
.debug("construct_intrasite_graph(): enter" +
1834 "\n\tgc_only=%d" % gc_only
+
1835 "\n\tdetect_stale=%d" % detect_stale
+
1836 "\n\tneeded=%s" % needed
+
1838 "\n\tpartial=%s" % partial
+
1844 # Create a NCReplica that matches what the local replica
1845 # should say. We'll use this below in our r_list
1846 l_of_x
= NCReplica(dc_local
.dsa_dnstr
, dc_local
.dsa_guid
,
1849 l_of_x
.identify_by_basedn(self
.samdb
)
1851 l_of_x
.rep_partial
= partial
1854 # Add this replica that "should be present" to the
1855 # needed replica table for this DSA
1856 dc_local
.add_needed_replica(l_of_x
)
1858 # Empty replica sequence list
1861 # We'll loop thru all the DSAs looking for
1862 # writeable NC replicas that match the naming
1863 # context dn for (nc_x)
1865 for dc_s_dn
, dc_s
in self
.my_site
.dsa_table
.items():
1867 # If this partition (nc_x) doesn't appear as a
1868 # replica (f_of_x) on (dc_s) then continue
1869 if not nc_x
.nc_dnstr
in dc_s
.current_rep_table
.keys():
1872 # Pull out the NCReplica (f) of (x) with the dn
1873 # that matches NC (x) we are examining.
1874 f_of_x
= dc_s
.current_rep_table
[nc_x
.nc_dnstr
]
1876 # Replica (f) of NC (x) must be writable
1880 # Replica (f) of NC (x) must satisfy the
1881 # "is present" criteria for DC (s) that
1883 if not f_of_x
.is_present():
1886 # DC (s) must be a writable DSA other than
1887 # my local DC. In other words we'd only replicate
1888 # from other writable DC
1889 if dc_s
.is_ro() or dc_s
is dc_local
:
1892 # Certain replica graphs are produced only
1893 # for global catalogs, so test against
1894 # method input parameter
1895 if gc_only
and not dc_s
.is_gc():
1898 # DC (s) must be in the same site as the local DC
1899 # as this is the intra-site algorithm. This is
1900 # handled by virtue of placing DSAs in per
1901 # site objects (see enclosing for() loop)
1903 # If NC (x) is intended to be read-only full replica
1904 # for a domain NC on the target DC then the source
1905 # DC should have functional level at minimum WIN2008
1907 # Effectively we're saying that in order to replicate
1908 # to a targeted RODC (which was introduced in Windows 2008)
1909 # then we have to replicate from a DC that is also minimally
1912 # You can also see this requirement in the MS special
1913 # considerations for RODC which state that to deploy
1914 # an RODC, at least one writable domain controller in
1915 # the domain must be running Windows Server 2008
1916 if ro
and not partial
and nc_x
.nc_type
== NCType
.domain
:
1917 if not dc_s
.is_minimum_behavior(dsdb
.DS_DOMAIN_FUNCTION_2008
):
1920 # If we haven't been told to turn off stale connection
1921 # detection and this dsa has a stale connection then
1923 if detect_stale
and self
.is_stale_link_connection(dc_s
):
1926 # Replica meets criteria. Add it to table indexed
1927 # by the GUID of the DC that it appears on
1928 r_list
.append(f_of_x
)
1930 # If a partial (not full) replica of NC (x) "should be present"
1931 # on the local DC, append to R each partial replica (p of x)
1932 # such that p "is present" on a DC satisfying the same
1933 # criteria defined above for full replica DCs.
1936 # Now we loop thru all the DSAs looking for
1937 # partial NC replicas that match the naming
1938 # context dn for (NC x)
1939 for dc_s_dn
, dc_s
in self
.my_site
.dsa_table
.items():
1941 # If this partition NC (x) doesn't appear as a
1942 # replica (p) of NC (x) on the dsa DC (s) then
1944 if not nc_x
.nc_dnstr
in dc_s
.current_rep_table
.keys():
1947 # Pull out the NCReplica with the dn that
1948 # matches NC (x) we are examining.
1949 p_of_x
= dc_s
.current_rep_table
[nc_x
.nc_dnstr
]
1951 # Replica (p) of NC (x) must be partial
1952 if not p_of_x
.is_partial():
1955 # Replica (p) of NC (x) must satisfy the
1956 # "is present" criteria for DC (s) that
1958 if not p_of_x
.is_present():
1961 # DC (s) must be a writable DSA other than
1962 # my DSA. In other words we'd only replicate
1963 # from other writable DSA
1964 if dc_s
.is_ro() or dc_s
is dc_local
:
1967 # Certain replica graphs are produced only
1968 # for global catalogs, so test against
1969 # method input parameter
1970 if gc_only
and not dc_s
.is_gc():
1973 # DC (s) must be in the same site as the local DC
1974 # as this is the intra-site algorithm. This is
1975 # handled by virtue of placing DSAs in per
1976 # site objects (see enclosing for() loop)
1978 # This criteria is moot (a no-op) for this case
1979 # because we are scanning for (partial = True). The
1980 # MS algorithm statement says partial replica scans
1981 # should adhere to the "same" criteria as full replica
1982 # scans so the criteria doesn't change here...its just
1983 # rendered pointless.
1985 # The case that is occurring would be a partial domain
1986 # replica is needed on a local DC global catalog. There
1987 # is no minimum windows behavior for those since GCs
1988 # have always been present.
1989 if ro
and not partial
and nc_x
.nc_type
== NCType
.domain
:
1990 if not dc_s
.is_minimum_behavior(dsdb
.DS_DOMAIN_FUNCTION_2008
):
1993 # If we haven't been told to turn off stale connection
1994 # detection and this dsa has a stale connection then
1996 if detect_stale
and self
.is_stale_link_connection(dc_s
):
1999 # Replica meets criteria. Add it to table indexed
2000 # by the GUID of the DSA that it appears on
2001 r_list
.append(p_of_x
)
2003 # Append to R the NC replica that "should be present"
2005 r_list
.append(l_of_x
)
2007 r_list
.sort(sort_replica_by_dsa_guid
)
2011 max_node_edges
= self
.intrasite_max_node_edges(r_len
)
2013 # Add a node for each r_list element to the replica graph
2016 node
= GraphNode(rep
.rep_dsa_dnstr
, max_node_edges
)
2017 graph_list
.append(node
)
2019 # For each r(i) from (0 <= i < |R|-1)
2021 while i
< (r_len
-1):
2022 # Add an edge from r(i) to r(i+1) if r(i) is a full
2023 # replica or r(i+1) is a partial replica
2024 if not r_list
[i
].is_partial() or r_list
[i
+1].is_partial():
2025 graph_list
[i
+1].add_edge_from(r_list
[i
].rep_dsa_dnstr
)
2027 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2028 # replica or ri is a partial replica.
2029 if not r_list
[i
+1].is_partial() or r_list
[i
].is_partial():
2030 graph_list
[i
].add_edge_from(r_list
[i
+1].rep_dsa_dnstr
)
2033 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2034 # or r0 is a partial replica.
2035 if not r_list
[r_len
-1].is_partial() or r_list
[0].is_partial():
2036 graph_list
[0].add_edge_from(r_list
[r_len
-1].rep_dsa_dnstr
)
2038 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2039 # r|R|-1 is a partial replica.
2040 if not r_list
[0].is_partial() or r_list
[r_len
-1].is_partial():
2041 graph_list
[r_len
-1].add_edge_from(r_list
[0].rep_dsa_dnstr
)
2043 # For each existing nTDSConnection object implying an edge
2044 # from rj of R to ri such that j != i, an edge from rj to ri
2045 # is not already in the graph, and the total edges directed
2046 # to ri is less than n+2, the KCC adds that edge to the graph.
2049 dsa
= self
.my_site
.dsa_table
[graph_list
[i
].dsa_dnstr
]
2050 graph_list
[i
].add_edges_from_connections(dsa
)
2055 tnode
= graph_list
[i
]
2057 # To optimize replication latency in sites with many NC replicas, the
2058 # KCC adds new edges directed to ri to bring the total edges to n+2,
2059 # where the NC replica rk of R from which the edge is directed
2060 # is chosen at random such that k != i and an edge from rk to ri
2061 # is not already in the graph.
2063 # Note that the KCC tech ref does not give a number for the definition
2064 # of "sites with many NC replicas". At a bare minimum to satisfy
2065 # n+2 edges directed at a node we have to have at least three replicas
2066 # in |R| (i.e. if n is zero then at least replicas from two other graph
2067 # nodes may direct edges to us).
2069 # pick a random index
2070 findex
= rindex
= random
.randint(0, r_len
-1)
2072 # while this node doesn't have sufficient edges
2073 while not tnode
.has_sufficient_edges():
2074 # If this edge can be successfully added (i.e. not
2075 # the same node and edge doesn't already exist) then
2076 # select a new random index for the next round
2077 if tnode
.add_edge_from(graph_list
[rindex
].dsa_dnstr
):
2078 findex
= rindex
= random
.randint(0, r_len
-1)
2080 # Otherwise continue looking against each node
2081 # after the random selection
2086 if rindex
== findex
:
2087 logger
.error("Unable to satisfy max edge criteria!")
2090 # Print the graph node in debug mode
2091 logger
.debug("%s" % tnode
)
2093 # For each edge directed to the local DC, ensure a nTDSConnection
2094 # points to us that satisfies the KCC criteria
2095 if graph_list
[i
].dsa_dnstr
== dc_local
.dsa_dnstr
:
2096 graph_list
[i
].add_connections_from_edges(dc_local
)
2100 def intrasite(self
):
2101 """The head method for generating the intra-site KCC replica
2102 connection graph and attendant nTDSConnection objects
2108 logger
.debug("intrasite(): enter")
2110 # Test whether local site has topology disabled
2111 mysite
= self
.site_table
[self
.my_site_dnstr
]
2112 if mysite
.is_intrasite_topology_disabled():
2115 detect_stale
= (not mysite
.is_detect_stale_disabled())
2117 # Loop thru all the partitions.
2118 for partdn
, part
in self
.part_table
.items():
2119 self
.construct_intrasite_graph(mysite
, mydsa
, part
, False,
2122 # If the DC is a GC server, the KCC constructs an additional NC
2123 # replica graph (and creates nTDSConnection objects) for the
2124 # config NC as above, except that only NC replicas that "are present"
2125 # on GC servers are added to R.
2126 for partdn
, part
in self
.part_table
.items():
2127 if part
.is_config():
2128 self
.construct_intrasite_graph(mysite
, mydsa
, part
, True,
2131 # The DC repeats the NC replica graph computation and nTDSConnection
2132 # creation for each of the NC replica graphs, this time assuming
2133 # that no DC has failed. It does so by re-executing the steps as
2134 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2135 # set in the options attribute of the site settings object for
2136 # the local DC's site. (ie. we set "detec_stale" flag to False)
2138 # Loop thru all the partitions.
2139 for partdn
, part
in self
.part_table
.items():
2140 self
.construct_intrasite_graph(mysite
, mydsa
, part
, False,
2141 False) # don't detect stale
2143 # If the DC is a GC server, the KCC constructs an additional NC
2144 # replica graph (and creates nTDSConnection objects) for the
2145 # config NC as above, except that only NC replicas that "are present"
2146 # on GC servers are added to R.
2147 for partdn
, part
in self
.part_table
.items():
2148 if part
.is_config():
2149 self
.construct_intrasite_graph(mysite
, mydsa
, part
, True,
2150 False) # don't detect stale
2153 # Display any to be added or modified repsFrom
2154 for dnstr
, connect
in mydsa
.connect_table
.items():
2155 if connect
.to_be_deleted
:
2156 logger
.info("TO BE DELETED:\n%s" % connect
)
2157 if connect
.to_be_modified
:
2158 logger
.info("TO BE MODIFIED:\n%s" % connect
)
2159 if connect
.to_be_added
:
2160 logger
.info("TO BE ADDED:\n%s" % connect
)
2162 mydsa
.commit_connections(self
.samdb
, ro
=True)
2164 # Commit any newly created connections to the samdb
2165 mydsa
.commit_connections(self
.samdb
)
2167 def run(self
, dburl
, lp
, creds
):
2168 """Method to perform a complete run of the KCC and
2169 produce an updated topology for subsequent NC replica
2170 syncronization between domain controllers
2172 # We may already have a samdb setup if we are
2173 # currently importing an ldif for a test run
2174 if self
.samdb
is None:
2176 self
.samdb
= SamDB(url
=dburl
,
2177 session_info
=system_session(),
2178 credentials
=creds
, lp
=lp
)
2180 except ldb
.LdbError
, (num
, msg
):
2181 logger
.error("Unable to open sam database %s : %s" %
2190 self
.load_all_sites()
2191 self
.load_all_partitions()
2192 self
.load_all_transports()
2193 self
.load_all_sitelinks()
2195 # These are the published steps (in order) for the
2196 # MS-TECH description of the KCC algorithm
2199 self
.refresh_failed_links_connections()
2205 all_connected
= self
.intersite()
2208 self
.remove_unneeded_ntdsconn(all_connected
)
2211 self
.translate_ntdsconn()
2214 self
.remove_unneeded_failed_links_connections()
2217 self
.update_rodc_connection()
2223 def import_ldif(self
, dburl
, lp
, creds
, ldif_file
):
2224 """Routine to import all objects and attributes that are relevent
2225 to the KCC algorithms from a previously exported LDIF file.
2227 The point of this function is to allow a programmer/debugger to
2228 import an LDIF file with non-security relevent information that
2229 was previously extracted from a DC database. The LDIF file is used
2230 to create a temporary abbreviated database. The KCC algorithm can
2231 then run against this abbreviated database for debug or test
2232 verification that the topology generated is computationally the
2233 same between different OSes and algorithms.
2235 :param dburl: path to the temporary abbreviated db to create
2236 :param ldif_file: path to the ldif file to import
2238 if os
.path
.exists(dburl
):
2239 logger
.error("Specify a database (%s) that doesn't already exist." %
2243 # Use ["modules:"] as we are attempting to build a sam
2244 # database as opposed to start it here.
2245 self
.samdb
= Ldb(url
=dburl
, session_info
=system_session(),
2246 lp
=lp
, options
=["modules:"])
2248 self
.samdb
.transaction_start()
2250 data
= read_and_sub_file(ldif_file
, None)
2251 self
.samdb
.add_ldif(data
, None)
2253 except Exception, estr
:
2254 logger
.error("%s" % estr
)
2255 self
.samdb
.transaction_cancel()
2258 self
.samdb
.transaction_commit()
2262 # We have an abbreviated list of options here because we have built
2263 # an abbreviated database. We use the rootdse and extended-dn
2264 # modules only during this re-open
2265 self
.samdb
= SamDB(url
=dburl
, session_info
=system_session(),
2266 credentials
=creds
, lp
=lp
,
2267 options
=["modules:rootdse,extended_dn_out_ldb"])
2270 def export_ldif(self
, dburl
, lp
, creds
, ldif_file
):
2271 """Routine to extract all objects and attributes that are relevent
2272 to the KCC algorithms from a DC database.
2274 The point of this function is to allow a programmer/debugger to
2275 extract an LDIF file with non-security relevent information from
2276 a DC database. The LDIF file can then be used to "import" via
2277 the import_ldif() function this file into a temporary abbreviated
2278 database. The KCC algorithm can then run against this abbreviated
2279 database for debug or test verification that the topology generated
2280 is computationally the same between different OSes and algorithms.
2282 :param dburl: LDAP database URL to extract info from
2283 :param ldif_file: output LDIF file name to create
2286 self
.samdb
= SamDB(url
=dburl
,
2287 session_info
=system_session(),
2288 credentials
=creds
, lp
=lp
)
2289 except ldb
.LdbError
, (enum
, estr
):
2290 logger
.error("Unable to open sam database (%s) : %s" %
2294 if os
.path
.exists(ldif_file
):
2295 logger
.error("Specify a file (%s) that doesn't already exist." %
2300 f
= open(ldif_file
, "w")
2301 except IOError as ioerr
:
2302 logger
.error("Unable to open (%s) : %s" % (ldif_file
, str(ioerr
)))
2307 attrs
= [ "objectClass",
2316 "msDS-NC-Replica-Locations",
2317 "msDS-NC-RO-Replica-Locations" ]
2319 sstr
= "CN=Partitions,%s" % self
.samdb
.get_config_basedn()
2320 res
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2322 expression
="(objectClass=crossRef)")
2324 # Write partitions output
2325 write_search_result(self
.samdb
, f
, res
)
2327 # Query cross reference container
2328 attrs
= [ "objectClass",
2334 "msDS-Behavior-Version",
2335 "msDS-EnabledFeature" ]
2337 sstr
= "CN=Partitions,%s" % self
.samdb
.get_config_basedn()
2338 res
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2340 expression
="(objectClass=crossRefContainer)")
2342 # Write cross reference container output
2343 write_search_result(self
.samdb
, f
, res
)
2346 attrs
= [ "objectClass",
2352 sstr
= "CN=Sites,%s" % self
.samdb
.get_config_basedn()
2353 sites
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2355 expression
="(objectClass=site)")
2357 # Write sites output
2358 write_search_result(self
.samdb
, f
, sites
)
2360 # Query NTDS Site Settings
2362 sitestr
= str(msg
.dn
)
2364 attrs
= [ "objectClass",
2368 "interSiteTopologyGenerator",
2369 "interSiteTopologyFailover",
2373 sstr
= "CN=NTDS Site Settings,%s" % sitestr
2374 res
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_BASE
,
2377 # Write Site Settings output
2378 write_search_result(self
.samdb
, f
, res
)
2380 # Naming context list
2383 # Query Directory Service Agents
2387 ncattrs
= [ "hasMasterNCs",
2388 "msDS-hasMasterNCs",
2389 "hasPartialReplicaNCs",
2390 "msDS-HasDomainNCs",
2391 "msDS-hasFullReplicaNCs",
2392 "msDS-HasInstantiatedNCs" ]
2393 attrs
= [ "objectClass",
2400 "msDS-Behavior-Version" ]
2402 res
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2403 attrs
=attrs
+ ncattrs
,
2404 expression
="(objectClass=nTDSDSA)")
2406 # Spin thru all the DSAs looking for NC replicas
2407 # and build a list of all possible Naming Contexts
2408 # for subsequent retrieval below
2410 for k
in msg
.keys():
2412 for value
in msg
[k
]:
2413 # Some of these have binary DNs so
2414 # use dsdb_Dn to split out relevent parts
2415 dsdn
= dsdb_Dn(self
.samdb
, value
)
2416 dnstr
= str(dsdn
.dn
)
2417 if dnstr
not in nclist
:
2418 nclist
.append(dnstr
)
2421 write_search_result(self
.samdb
, f
, res
)
2423 # Query NTDS Connections
2427 attrs
= [ "objectClass",
2433 "enabledConnection",
2439 res
= self
.samdb
.search(base
=sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2441 expression
="(objectClass=nTDSConnection)")
2442 # Write NTDS Connection output
2443 write_search_result(self
.samdb
, f
, res
)
2446 # Query Intersite transports
2447 attrs
= [ "objectClass",
2453 "bridgeheadServerListBL",
2454 "transportAddressAttribute" ]
2456 sstr
= "CN=Inter-Site Transports,CN=Sites,%s" % \
2457 self
.samdb
.get_config_basedn()
2458 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2460 expression
="(objectClass=interSiteTransport)")
2462 # Write inter-site transport output
2463 write_search_result(self
.samdb
, f
, res
)
2466 attrs
= [ "objectClass",
2477 sstr
= "CN=Sites,%s" % \
2478 self
.samdb
.get_config_basedn()
2479 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2481 expression
="(objectClass=siteLink)")
2483 # Write siteLink output
2484 write_search_result(self
.samdb
, f
, res
)
2486 # Query siteLinkBridge
2487 attrs
= [ "objectClass",
2493 sstr
= "CN=Sites,%s" % self
.samdb
.get_config_basedn()
2494 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2496 expression
="(objectClass=siteLinkBridge)")
2498 # Write siteLinkBridge output
2499 write_search_result(self
.samdb
, f
, res
)
2501 # Query servers containers
2502 # Needed for samdb.server_site_name()
2503 attrs
= [ "objectClass",
2509 sstr
= "CN=Sites,%s" % self
.samdb
.get_config_basedn()
2510 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2512 expression
="(objectClass=serversContainer)")
2514 # Write servers container output
2515 write_search_result(self
.samdb
, f
, res
)
2518 # Needed because some transport interfaces refer back to
2519 # attributes found in the server object. Also needed
2520 # so extended-dn will be happy with dsServiceName in rootDSE
2521 attrs
= [ "objectClass",
2529 sstr
= "CN=Sites,%s" % self
.samdb
.get_config_basedn()
2530 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_SUBTREE
,
2532 expression
="(objectClass=server)")
2534 # Write server output
2535 write_search_result(self
.samdb
, f
, res
)
2537 # Query Naming Context replicas
2538 attrs
= [ "objectClass",
2544 "msDS-Behavior-Version",
2549 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_BASE
,
2552 # Write naming context output
2553 write_search_result(self
.samdb
, f
, res
)
2555 # Query rootDSE replicas
2556 attrs
=[ "objectClass",
2560 "rootDomainNamingContext",
2561 "configurationNamingContext",
2562 "schemaNamingContext",
2563 "defaultNamingContext",
2567 res
= self
.samdb
.search(sstr
, scope
=ldb
.SCOPE_BASE
,
2570 # Record the rootDSE object as a dn as it
2571 # would appear in the base ldb file. We have
2572 # to save it this way because we are going to
2573 # be importing as an abbreviated database.
2574 res
[0].dn
= ldb
.Dn(self
.samdb
, "@ROOTDSE")
2576 # Write rootdse output
2577 write_search_result(self
.samdb
, f
, res
)
2579 except ldb
.LdbError
, (enum
, estr
):
2580 logger
.error("Error processing (%s) : %s" % (sstr
, estr
))
2586 ##################################################
2588 ##################################################
2589 def sort_replica_by_dsa_guid(rep1
, rep2
):
2590 return cmp(rep1
.rep_dsa_guid
, rep2
.rep_dsa_guid
)
2592 def sort_dsa_by_gc_and_guid(dsa1
, dsa2
):
2593 if dsa1
.is_gc() and not dsa2
.is_gc():
2595 if not dsa1
.is_gc() and dsa2
.is_gc():
2597 return cmp(dsa1
.dsa_guid
, dsa2
.dsa_guid
)
2599 def is_smtp_replication_available():
2600 """Currently always returns false because Samba
2601 doesn't implement SMTP transfer for NC changes
2606 def write_search_result(samdb
, f
, res
):
2608 lstr
= samdb
.write_ldif(msg
, ldb
.CHANGETYPE_NONE
)
2609 f
.write("%s" % lstr
)
2611 def create_edge(con_type
, site_link
, dn_to_vertex
):
2613 e
.site_link
= site_link
2615 for site
in site_link
.site_list
:
2616 if site
in dn_to_vertex
:
2617 e
.vertices
.extend(dn_to_vertex
.get(site
))
2618 e
.repl_info
.cost
= site_link
.cost
2619 e
.repl_info
.options
= site_link
.options
2620 e
.repl_info
.interval
= site_link
.interval
2621 e
.repl_info
.schedule
= site_link
.schedule
2622 e
.con_type
= con_type
2626 def create_auto_edge_set(graph
, transport
):
2627 e_set
= MultiEdgeSet()
2628 e_set
.guid
= misc
.GUID() # NULL guid, not associated with a SiteLinkBridge object
2629 for site_link
in graph
.edges
:
2630 if site_link
.con_type
== transport
:
2631 e_set
.edges
.append(site_link
)
2635 def create_edge_set(graph
, transport
, site_link_bridge
):
2636 # TODO not implemented - need to store all site link bridges
2637 e_set
= MultiEdgeSet()
2638 # e_set.guid = site_link_bridge
2641 def setup_vertices(graph
):
2642 for v
in graph
.vertices
:
2644 v
.repl_info
.cost
= 2 ** 32 - 1
2646 v
.component_id
= None
2648 v
.repl_info
.cost
= 0
2652 v
.repl_info
.interval
= 0
2653 v
.repl_info
.options
= 0xFFFFFFFF
2654 v
.repl_info
.schedule
= None # TODO highly suspicious
2657 def dijkstra(graph
, edge_type
, include_black
):
2659 setup_dijkstra(graph
, edge_type
, include_black
, queue
)
2660 while len(queue
) > 0:
2661 cost
, guid
, vertex
= heapq
.heappop(queue
)
2662 for edge
in vertex
.edges
:
2663 for v
in edge
.vertices
:
2665 # add new path from vertex to v
2666 try_new_path(graph
, queue
, vertex
, edge
, v
)
2668 def setup_dijkstra(graph
, edge_type
, include_black
, queue
):
2669 setup_vertices(graph
)
2670 for vertex
in graph
.vertices
:
2671 if vertex
.is_white():
2674 if ((vertex
.is_black() and not include_black
)
2675 or edge_type
not in vertex
.accept_black
2676 or edge_type
not in vertex
.accept_red_red
):
2677 vertex
.repl_info
.cost
= 2 ** 32 - 1
2678 vertex
.root
= None # NULL GUID
2679 vertex
.demoted
= True # Demoted appears not to be used
2681 # TODO guid must be string?
2682 heapq
.heappush(queue
, (vertex
.replInfo
.cost
, vertex
.guid
, vertex
))
2684 def try_new_path(graph
, queue
, vfrom
, edge
, vto
):
2686 # What this function checks is that there is a valid time frame for
2687 # which replication can actually occur, despite being adequately
2689 intersect
= combine_repl_info(vfrom
.repl_info
, edge
.repl_info
, newRI
)
2691 # If the new path costs more than the current, then ignore the edge
2692 if newRI
.cost
> vto
.repl_info
.cost
:
2695 if newRI
.cost
< vto
.repl_info
.cost
and not intersect
:
2698 new_duration
= total_schedule(newRI
.schedule
)
2699 old_duration
= total_schedule(vto
.repl_info
.schedule
)
2701 # Cheaper or longer schedule
2702 if newRI
.cost
< vto
.repl_info
.cost
or new_duration
> old_duration
:
2703 vto
.root
= vfrom
.root
2704 vto
.component_id
= vfrom
.component_id
2705 vto
.repl_info
= newRI
2706 heapq
.heappush(queue
, (vto
.repl_info
.cost
, vto
.guid
, vto
))
2708 def check_demote_vertex(vertex
, edge_type
):
2709 if vertex
.is_white():
2712 # Accepts neither red-red nor black edges, demote
2713 if edge_type
not in vertex
.accept_black
and edge_type
not in vertex
.accept_red_red
:
2714 vertex
.repl_info
.cost
= 2 ** 32 - 1
2716 vertex
.demoted
= True # Demoted appears not to be used
2718 def undemote_vertex(vertex
):
2719 if vertex
.is_white():
2722 vertex
.repl_info
.cost
= 0
2723 vertex
.root
= vertex
2724 vertex
.demoted
= False
2726 def process_edge_set(graph
, e_set
, internal_edges
):
2728 for edge
in graph
.edges
:
2729 for vertex
in edge
.vertices
:
2730 check_demote_vertex(vertex
, edge
.con_type
)
2731 process_edge(graph
, edge
, internal_edges
)
2732 for vertex
in edge
.vertices
:
2733 undemote_vertex(vertex
)
2735 for edge
in e_set
.edges
:
2736 process_edge(graph
, edge
, internal_edges
)
2738 def process_edge(graph
, examine
, internal_edges
):
2739 # Find the set of all vertices touches the edge to examine
2741 for v
in examine
.vertices
:
2742 # Append a 4-tuple of color, repl cost, guid and vertex
2743 vertices
.append((v
.color
, v
.repl_info
.cost
, v
.guid
, v
))
2744 # Sort by color, lower
2747 color
, cost
, guid
, bestv
= vertices
[0]
2748 # Add to internal edges an edge from every colored vertex to bestV
2749 for v
in examine
.vertices
:
2750 if v
.component_id
is None or v
.root
is None:
2753 # Only add edge if valid inter-tree edge - needs a root and
2754 # different components
2755 if (bestv
.component_id
is not None and bestv
.root
is not None
2756 and v
.component_id
is not None and v
.root
is not None and
2757 bestv
.component_id
!= v
.component_id
):
2758 add_int_edge(graph
, internal_edges
, examine
, bestv
, v
)
2760 # Add internal edge, endpoints are roots of the vertices to pass in and are always colored
2761 def add_int_edge(graph
, internal_edges
, examine
, v1
, v2
):
2766 if root1
.is_red() and root2
.is_red():
2770 if (examine
.con_type
not in root1
.accept_red_red
2771 or examine
.con_type
not in root2
.accept_red_red
):
2774 if (examine
.con_type
not in root1
.accept_black
2775 or examine
.con_type
not in root2
.accept_black
):
2781 # Create the transitive replInfo for the two trees and this edge
2782 if not combine_repl_info(v1
.repl_info
, v2
.repl_info
, ri
):
2784 # ri is now initialized
2785 if not combine_repl_info(ri
, examine
.repl_info
, ri2
):
2788 newIntEdge
= InternalEdge(root1
, root2
, red_red
, ri2
, examine
.con_type
)
2789 # Order by vertex guid
2790 if newIntEdge
.v1
.guid
> newIntEdge
.v2
.guid
: # TODO compare guid (str)
2791 newIntEdge
.v1
= root2
2792 newIntEdge
.v2
= root1
2794 internal_edges
.add(newIntEdge
)
2796 def kruskal(graph
, edges
):
2797 for v
in graph
.vertices
:
2800 components
= set(graph
.vertices
)
2803 # Sorted based on internal comparison function of internal edge
2806 expected_num_tree_edges
= 0 # TODO this value makes little sense
2811 while index
< len(edges
): # TODO and num_components > 1
2813 parent1
= find_component(e
.v1
)
2814 parent2
= find_component(e
.v2
)
2815 if parent1
is not parent2
:
2817 add_out_edge(graph
, output_edges
, e
)
2818 parent1
.component
= parent2
2819 components
.discard(parent1
)
2823 return output_edges
, len(components
)
2825 def find_component(vertex
):
2826 if vertex
.component
is vertex
:
2830 while current
.component
is not current
:
2831 current
= current
.component
2835 while current
.component
is not root
:
2836 n
= current
.component
2837 current
.component
= root
2842 def add_out_edge(graph
, output_edges
, e
):
2846 # This multi-edge is a 'real' edge with no GUID
2849 ee
.vertices
.append(v1
)
2850 ee
.vertices
.append(v2
)
2851 ee
.con_type
= e
.e_type
2852 ee
.repl_info
= e
.repl_info
2853 output_edges
.append(ee
)
2860 ##################################################
2861 # samba_kcc entry point
2862 ##################################################
2864 parser
= optparse
.OptionParser("samba_kcc [options]")
2865 sambaopts
= options
.SambaOptions(parser
)
2866 credopts
= options
.CredentialsOptions(parser
)
2868 parser
.add_option_group(sambaopts
)
2869 parser
.add_option_group(credopts
)
2870 parser
.add_option_group(options
.VersionOptions(parser
))
2872 parser
.add_option("--readonly",
2873 help="compute topology but do not update database",
2874 action
="store_true")
2876 parser
.add_option("--debug",
2877 help="debug output",
2878 action
="store_true")
2880 parser
.add_option("--seed",
2881 help="random number seed",
2882 type=str, metavar
="<number>")
2884 parser
.add_option("--importldif",
2885 help="import topology ldif file",
2886 type=str, metavar
="<file>")
2888 parser
.add_option("--exportldif",
2889 help="export topology ldif file",
2890 type=str, metavar
="<file>")
2892 parser
.add_option("-H", "--URL" ,
2893 help="LDB URL for database or target server",
2894 type=str, metavar
="<URL>", dest
="dburl")
2896 parser
.add_option("--tmpdb",
2897 help="schemaless database file to create for ldif import",
2898 type=str, metavar
="<file>")
2900 logger
= logging
.getLogger("samba_kcc")
2901 logger
.addHandler(logging
.StreamHandler(sys
.stdout
))
2903 lp
= sambaopts
.get_loadparm()
2904 creds
= credopts
.get_credentials(lp
, fallback_machine
=True)
2906 opts
, args
= parser
.parse_args()
2908 if opts
.readonly
is None:
2909 opts
.readonly
= False
2912 logger
.setLevel(logging
.DEBUG
)
2914 logger
.setLevel(logging
.INFO
)
2916 logger
.setLevel(logging
.WARNING
)
2918 # initialize seed from optional input parameter
2920 random
.seed(int(opts
.seed
))
2922 random
.seed(0xACE5CA11)
2924 if opts
.dburl
is None:
2925 opts
.dburl
= lp
.samdb_url()
2927 # Instantiate Knowledge Consistency Checker and perform run
2931 rc
= kcc
.export_ldif(opts
.dburl
, lp
, creds
, opts
.exportldif
)
2935 if opts
.tmpdb
is None or opts
.tmpdb
.startswith('ldap'):
2936 logger
.error("Specify a target temp database file with --tmpdb option.")
2939 rc
= kcc
.import_ldif(opts
.tmpdb
, lp
, creds
, opts
.importldif
)
2943 rc
= kcc
.run(opts
.dburl
, lp
, creds
)