1 # -*- test-case-name: buildbot.test.test_run -*-
15 from twisted
.python
import log
, components
16 from twisted
.internet
import defer
, reactor
17 from twisted
.spread
import pb
18 from twisted
.cred
import portal
, checkers
19 from twisted
.application
import service
, strports
20 from twisted
.persisted
import styles
23 from buildbot
.twcompat
import implements
24 from buildbot
.util
import now
25 from buildbot
.pbutil
import NewCredPerspective
26 from buildbot
.process
.builder
import Builder
, IDLE
27 from buildbot
.process
.base
import BuildRequest
28 from buildbot
.status
.builder
import SlaveStatus
, Status
29 from buildbot
.changes
.changes
import Change
, ChangeMaster
30 from buildbot
.sourcestamp
import SourceStamp
31 from buildbot
import interfaces
33 ########################################
38 class BotPerspective(NewCredPerspective
):
39 """This is the master-side representative for a remote buildbot slave.
40 There is exactly one for each slave described in the config file (the
41 c['bots'] list). When buildbots connect in (.attach), they get a
42 reference to this instance. The BotMaster object is stashed as the
43 .service attribute."""
45 def __init__(self
, name
, botmaster
):
47 self
.botmaster
= botmaster
48 self
.slave_status
= SlaveStatus(name
)
49 self
.slave
= None # a RemoteReference to the Bot, when connected
50 self
.slave_commands
= None
52 def updateSlave(self
):
53 """Called to add or remove builders after the slave has connected.
55 @return: a Deferred that indicates when an attached slave has
56 accepted the new builders and/or released the old ones."""
58 return self
.sendBuilderList()
59 return defer
.succeed(None)
62 return "<BotPerspective '%s', builders: %s>" % \
64 string
.join(map(lambda b
: b
.name
, self
.builders
), ','))
66 def attached(self
, bot
):
67 """This is called when the slave connects.
69 @return: a Deferred that fires with a suitable pb.IPerspective to
70 give to the slave (i.e. 'self')"""
73 # uh-oh, we've got a duplicate slave. The most likely
74 # explanation is that the slave is behind a slow link, thinks we
75 # went away, and has attempted to reconnect, so we've got two
76 # "connections" from the same slave, but the previous one is
77 # stale. Give the new one precedence.
78 log
.msg("duplicate slave %s replacing old one" % self
.slavename
)
80 # just in case we've got two identically-configured slaves,
81 # report the IP addresses of both so someone can resolve the
83 tport
= self
.slave
.broker
.transport
84 log
.msg("old slave was connected from", tport
.getPeer())
85 log
.msg("new slave is from", bot
.broker
.transport
.getPeer())
88 d
= defer
.succeed(None)
89 # now we go through a sequence of calls, gathering information, then
90 # tell the Botmaster that it can finally give this slave to all the
91 # Builders that care about it.
93 # we accumulate slave information in this 'state' dictionary, then
94 # set it atomically if we make it far enough through the process
97 def _log_attachment_on_slave(res
):
98 d1
= bot
.callRemote("print", "attached")
99 d1
.addErrback(lambda why
: None)
101 d
.addCallback(_log_attachment_on_slave
)
104 d1
= bot
.callRemote("getSlaveInfo")
106 log
.msg("Got slaveinfo from '%s'" % self
.slavename
)
107 # TODO: info{} might have other keys
108 state
["admin"] = info
.get("admin")
109 state
["host"] = info
.get("host")
110 def _info_unavailable(why
):
111 # maybe an old slave, doesn't implement remote_getSlaveInfo
112 log
.msg("BotPerspective.info_unavailable")
114 d1
.addCallbacks(_got_info
, _info_unavailable
)
116 d
.addCallback(_get_info
)
118 def _get_commands(res
):
119 d1
= bot
.callRemote("getCommands")
120 def _got_commands(commands
):
121 state
["slave_commands"] = commands
122 def _commands_unavailable(why
):
123 # probably an old slave
124 log
.msg("BotPerspective._commands_unavailable")
125 if why
.check(AttributeError):
128 d1
.addCallbacks(_got_commands
, _commands_unavailable
)
130 d
.addCallback(_get_commands
)
132 def _accept_slave(res
):
133 self
.slave_status
.setAdmin(state
.get("admin"))
134 self
.slave_status
.setHost(state
.get("host"))
135 self
.slave_status
.setConnected(True)
136 self
.slave_commands
= state
.get("slave_commands")
138 log
.msg("bot attached")
139 return self
.updateSlave()
140 d
.addCallback(_accept_slave
)
142 # Finally, the slave gets a reference to this BotPerspective. They
143 # receive this later, after we've started using them.
144 d
.addCallback(lambda res
: self
)
147 def detached(self
, mind
):
149 self
.slave_status
.setConnected(False)
150 self
.botmaster
.slaveLost(self
)
151 log
.msg("BotPerspective.detached(%s)" % self
.slavename
)
154 def disconnect(self
):
155 """Forcibly disconnect the slave.
157 This severs the TCP connection and returns a Deferred that will fire
158 (with None) when the connection is probably gone.
160 If the slave is still alive, they will probably try to reconnect
163 This is called in two circumstances. The first is when a slave is
164 removed from the config file. In this case, when they try to
165 reconnect, they will be rejected as an unknown slave. The second is
166 when we wind up with two connections for the same slave, in which
167 case we disconnect the older connection.
171 return defer
.succeed(None)
172 log
.msg("disconnecting old slave %s now" % self
.slavename
)
174 # all kinds of teardown will happen as a result of
175 # loseConnection(), but it happens after a reactor iteration or
176 # two. Hook the actual disconnect so we can know when it is safe
177 # to connect the new slave. We have to wait one additional
178 # iteration (with callLater(0)) to make sure the *other*
179 # notifyOnDisconnect handlers have had a chance to run.
182 # notifyOnDisconnect runs the callback with one argument, the
183 # RemoteReference being disconnected.
184 def _disconnected(rref
):
185 reactor
.callLater(0, d
.callback
, None)
186 self
.slave
.notifyOnDisconnect(_disconnected
)
187 tport
= self
.slave
.broker
.transport
188 # this is the polite way to request that a socket be closed
189 tport
.loseConnection()
191 # but really we don't want to wait for the transmit queue to
192 # drain. The remote end is unlikely to ACK the data, so we'd
193 # probably have to wait for a (20-minute) TCP timeout.
194 #tport._closeSocket()
195 # however, doing _closeSocket (whether before or after
196 # loseConnection) somehow prevents the notifyOnDisconnect
197 # handlers from being run. Bummer.
199 tport
.dataBuffer
= ""
202 # however, these hacks are pretty internal, so don't blow up if
203 # they fail or are unavailable
204 log
.msg("failed to accelerate the shutdown process")
206 log
.msg("waiting for slave to finish disconnecting")
208 # When this Deferred fires, we'll be ready to accept the new slave
211 def sendBuilderList(self
):
212 our_builders
= self
.botmaster
.getBuildersForSlave(self
.slavename
)
213 blist
= [(b
.name
, b
.builddir
) for b
in our_builders
]
214 d
= self
.slave
.callRemote("setBuilderList", blist
)
217 for name
, remote
in slist
.items():
218 # use get() since we might have changed our mind since then
219 b
= self
.botmaster
.builders
.get(name
)
221 d1
= b
.attached(self
, remote
, self
.slave_commands
)
223 return defer
.DeferredList(dl
)
224 def _set_failed(why
):
225 log
.msg("BotPerspective.sendBuilderList (%s) failed" % self
)
227 # TODO: hang up on them?, without setBuilderList we can't use
229 d
.addCallbacks(_sent
, _set_failed
)
232 def perspective_keepalive(self
):
236 class BotMaster(service
.Service
):
238 """This is the master-side service which manages remote buildbot slaves.
239 It provides them with BotPerspectives, and distributes file change
240 notification messages to them.
247 self
.builderNames
= []
248 # builders maps Builder names to instances of bb.p.builder.Builder,
249 # which is the master-side object that defines and controls a build.
250 # They are added by calling botmaster.addBuilder() from the startup
253 # self.slaves contains a ready BotPerspective instance for each
254 # potential buildslave, i.e. all the ones listed in the config file.
255 # If the slave is connected, self.slaves[slavename].slave will
256 # contain a RemoteReference to their Bot instance. If it is not
257 # connected, that attribute will hold None.
258 self
.slaves
= {} # maps slavename to BotPerspective
259 self
.statusClientService
= None
262 # self.locks holds the real Lock instances
265 # these four are convenience functions for testing
267 def waitUntilBuilderAttached(self
, name
):
268 b
= self
.builders
[name
]
270 # return defer.succeed(None)
272 b
.watchers
['attach'].append(d
)
275 def waitUntilBuilderDetached(self
, name
):
276 b
= self
.builders
.get(name
)
277 if not b
or not b
.slaves
:
278 return defer
.succeed(None)
280 b
.watchers
['detach'].append(d
)
283 def waitUntilBuilderFullyDetached(self
, name
):
284 b
= self
.builders
.get(name
)
285 # TODO: this looks too deeply inside the Builder object
286 if not b
or not b
.slaves
:
287 return defer
.succeed(None)
289 b
.watchers
['detach_all'].append(d
)
292 def waitUntilBuilderIdle(self
, name
):
293 b
= self
.builders
[name
]
294 # TODO: this looks way too deeply inside the Builder object
298 b
.watchers
['idle'].append(d
)
300 return defer
.succeed(None)
303 def addSlave(self
, slavename
):
304 slave
= BotPerspective(slavename
, self
)
305 self
.slaves
[slavename
] = slave
307 def removeSlave(self
, slavename
):
308 d
= self
.slaves
[slavename
].disconnect()
309 del self
.slaves
[slavename
]
312 def slaveLost(self
, bot
):
313 for name
, b
in self
.builders
.items():
314 if bot
.slavename
in b
.slavenames
:
317 def getBuildersForSlave(self
, slavename
):
319 for b
in self
.builders
.values()
320 if slavename
in b
.slavenames
]
322 def getBuildernames(self
):
323 return self
.builderNames
325 def getBuilders(self
):
326 allBuilders
= [self
.builders
[name
] for name
in self
.builderNames
]
329 def setBuilders(self
, builders
):
331 self
.builderNames
= []
333 for slavename
in b
.slavenames
:
334 # this is actually validated earlier
335 assert slavename
in self
.slaves
336 self
.builders
[b
.name
] = b
337 self
.builderNames
.append(b
.name
)
339 d
= self
._updateAllSlaves
()
342 def _updateAllSlaves(self
):
343 """Notify all buildslaves about changes in their Builders."""
344 dl
= [s
.updateSlave() for s
in self
.slaves
.values()]
345 return defer
.DeferredList(dl
)
347 def maybeStartAllBuilds(self
):
348 for b
in self
.builders
.values():
351 def getPerspective(self
, slavename
):
352 return self
.slaves
[slavename
]
354 def shutdownSlaves(self
):
355 # TODO: make this into a bot method rather than a builder method
356 for b
in self
.slaves
.values():
359 def stopService(self
):
360 for b
in self
.builders
.values():
361 b
.builder_status
.addPointEvent(["master", "shutdown"])
362 b
.builder_status
.saveYourself()
363 return service
.Service
.stopService(self
)
365 def getLockByID(self
, lockid
):
366 """Convert a Lock identifier into an actual Lock instance.
367 @param lockid: a locks.MasterLock or locks.SlaveLock instance
368 @return: a locks.RealMasterLock or locks.RealSlaveLock instance
370 if not lockid
in self
.locks
:
371 self
.locks
[lockid
] = lockid
.lockClass(lockid
)
372 # if the master.cfg file has changed maxCount= on the lock, the next
373 # time a build is started, they'll get a new RealLock instance. Note
374 # that this requires that MasterLock and SlaveLock (marker) instances
375 # be hashable and that they should compare properly.
376 return self
.locks
[lockid
]
378 ########################################
382 class DebugPerspective(NewCredPerspective
):
383 def attached(self
, mind
):
385 def detached(self
, mind
):
388 def perspective_requestBuild(self
, buildername
, reason
, branch
, revision
):
389 c
= interfaces
.IControl(self
.master
)
390 bc
= c
.getBuilder(buildername
)
391 ss
= SourceStamp(branch
, revision
)
392 br
= BuildRequest(reason
, ss
, buildername
)
395 def perspective_pingBuilder(self
, buildername
):
396 c
= interfaces
.IControl(self
.master
)
397 bc
= c
.getBuilder(buildername
)
400 def perspective_fakeChange(self
, file, revision
=None, who
="fakeUser",
402 change
= Change(who
, [file], "some fake comments\n",
403 branch
=branch
, revision
=revision
)
404 c
= interfaces
.IControl(self
.master
)
407 def perspective_setCurrentState(self
, buildername
, state
):
408 builder
= self
.botmaster
.builders
.get(buildername
)
409 if not builder
: return
410 if state
== "offline":
411 builder
.statusbag
.currentlyOffline()
413 builder
.statusbag
.currentlyIdle()
414 if state
== "waiting":
415 builder
.statusbag
.currentlyWaiting(now()+10)
416 if state
== "building":
417 builder
.statusbag
.currentlyBuilding(None)
418 def perspective_reload(self
):
419 print "doing reload of the config file"
420 self
.master
.loadTheConfigFile()
421 def perspective_pokeIRC(self
):
422 print "saying something on IRC"
423 from buildbot
.status
import words
424 for s
in self
.master
:
425 if isinstance(s
, words
.IRC
):
427 for channel
in bot
.channels
:
428 print " channel", channel
429 bot
.p
.msg(channel
, "Ow, quit it")
431 def perspective_print(self
, msg
):
434 class Dispatcher(styles
.Versioned
):
436 implements(portal
.IRealm
)
438 __implements__
= portal
.IRealm
,
439 persistenceVersion
= 2
444 def upgradeToVersion1(self
):
445 self
.master
= self
.botmaster
.parent
446 def upgradeToVersion2(self
):
449 def register(self
, name
, afactory
):
450 self
.names
[name
] = afactory
451 def unregister(self
, name
):
454 def requestAvatar(self
, avatarID
, mind
, interface
):
455 assert interface
== pb
.IPerspective
456 afactory
= self
.names
.get(avatarID
)
458 p
= afactory
.getPerspective()
459 elif avatarID
== "debug":
460 p
= DebugPerspective()
461 p
.master
= self
.master
462 p
.botmaster
= self
.botmaster
463 elif avatarID
== "statusClient":
464 p
= self
.statusClientService
.getPerspective()
466 # it must be one of the buildslaves: no other names will make it
468 p
= self
.botmaster
.getPerspective(avatarID
)
471 raise ValueError("no perspective for '%s'" % avatarID
)
473 d
= defer
.maybeDeferred(p
.attached
, mind
)
474 d
.addCallback(self
._avatarAttached
, mind
)
477 def _avatarAttached(self
, p
, mind
):
478 return (pb
.IPerspective
, p
, lambda p
=p
,mind
=mind
: p
.detached(mind
))
480 ########################################
486 # all IChangeSource objects
487 # StatusClientService
488 # TCPClient(self.ircFactory)
489 # TCPServer(self.slaveFactory) -> dispatcher.requestAvatar
490 # TCPServer(self.site)
491 # UNIXServer(ResourcePublisher(self.site))
494 class BuildMaster(service
.MultiService
, styles
.Versioned
):
496 persistenceVersion
= 3
499 projectName
= "(unspecified)"
504 def __init__(self
, basedir
, configFileName
="master.cfg"):
505 service
.MultiService
.__init
__(self
)
506 self
.setName("buildmaster")
507 self
.basedir
= basedir
508 self
.configFileName
= configFileName
510 # the dispatcher is the realm in which all inbound connections are
511 # looked up: slave builders, change notifications, status clients, and
513 dispatcher
= Dispatcher()
514 dispatcher
.master
= self
515 self
.dispatcher
= dispatcher
516 self
.checker
= checkers
.InMemoryUsernamePasswordDatabaseDontUse()
517 # the checker starts with no user/passwd pairs: they are added later
518 p
= portal
.Portal(dispatcher
)
519 p
.registerChecker(self
.checker
)
520 self
.slaveFactory
= pb
.PBServerFactory(p
)
521 self
.slaveFactory
.unsafeTracebacks
= True # let them see exceptions
523 self
.slavePortnum
= None
524 self
.slavePort
= None
526 self
.botmaster
= BotMaster()
527 self
.botmaster
.setName("botmaster")
528 self
.botmaster
.setServiceParent(self
)
529 dispatcher
.botmaster
= self
.botmaster
531 self
.status
= Status(self
.botmaster
, self
.basedir
)
533 self
.statusTargets
= []
536 # this ChangeMaster is a dummy, only used by tests. In the real
537 # buildmaster, where the BuildMaster instance is activated
538 # (startService is called) by twistd, this attribute is overwritten.
539 self
.useChanges(ChangeMaster())
541 self
.readConfig
= False
543 def upgradeToVersion1(self
):
544 self
.dispatcher
= self
.slaveFactory
.root
.portal
.realm
546 def upgradeToVersion2(self
): # post-0.4.3
547 self
.webServer
= self
.webTCPPort
549 self
.webDistribServer
= self
.webUNIXPort
551 self
.configFileName
= "master.cfg"
553 def upgradeToVersion3(self
):
554 # post 0.6.3, solely to deal with the 0.6.3 breakage. Starting with
555 # 0.6.5 I intend to do away with .tap files altogether
557 self
.namedServices
= {}
560 def startService(self
):
561 service
.MultiService
.startService(self
)
562 self
.loadChanges() # must be done before loading the config file
563 if not self
.readConfig
:
564 # TODO: consider catching exceptions during this call to
565 # loadTheConfigFile and bailing (reactor.stop) if it fails,
566 # since without a config file we can't do anything except reload
567 # the config file, and it would be nice for the user to discover
569 self
.loadTheConfigFile()
570 if signal
and hasattr(signal
, "SIGHUP"):
571 signal
.signal(signal
.SIGHUP
, self
._handleSIGHUP
)
572 for b
in self
.botmaster
.builders
.values():
573 b
.builder_status
.addPointEvent(["master", "started"])
574 b
.builder_status
.saveYourself()
576 def useChanges(self
, changes
):
578 # TODO: can return a Deferred
579 self
.change_svc
.disownServiceParent()
580 self
.change_svc
= changes
581 self
.change_svc
.basedir
= self
.basedir
582 self
.change_svc
.setName("changemaster")
583 self
.dispatcher
.changemaster
= self
.change_svc
584 self
.change_svc
.setServiceParent(self
)
586 def loadChanges(self
):
587 filename
= os
.path
.join(self
.basedir
, "changes.pck")
589 changes
= pickle
.load(open(filename
, "rb"))
592 log
.msg("changes.pck missing, using new one")
593 changes
= ChangeMaster()
595 log
.msg("corrupted changes.pck, using new one")
596 changes
= ChangeMaster()
597 self
.useChanges(changes
)
599 def _handleSIGHUP(self
, *args
):
600 reactor
.callLater(0, self
.loadTheConfigFile
)
604 @rtype: L{buildbot.status.builder.Status}
608 def loadTheConfigFile(self
, configFile
=None):
610 configFile
= os
.path
.join(self
.basedir
, self
.configFileName
)
612 log
.msg("loading configuration from %s" % configFile
)
613 configFile
= os
.path
.expanduser(configFile
)
616 f
= open(configFile
, "r")
618 log
.msg("unable to open config file '%s'" % configFile
)
619 log
.msg("leaving old configuration in place")
626 log
.msg("error during loadConfig")
628 log
.msg("The new config file is unusable, so I'll ignore it.")
629 log
.msg("I will keep using the previous config file instead.")
632 def loadConfig(self
, f
):
633 """Internal function to load a specific configuration file. Any
634 errors in the file will be signalled by raising an exception.
636 @return: a Deferred that will fire (with None) when the configuration
637 changes have been completed. This may involve a round-trip to each
638 buildslave that was involved."""
640 localDict
= {'basedir': os
.path
.expanduser(self
.basedir
)}
644 log
.msg("error while parsing config file")
648 config
= localDict
['BuildmasterConfig']
650 log
.err("missing config dictionary")
651 log
.err("config file must define BuildmasterConfig")
654 known_keys
= "bots sources schedulers builders slavePortnum " + \
655 "debugPassword manhole " + \
656 "status projectName projectURL buildbotURL"
657 known_keys
= known_keys
.split()
658 for k
in config
.keys():
659 if k
not in known_keys
:
660 log
.msg("unknown key '%s' defined in config dictionary" % k
)
664 bots
= config
['bots']
665 sources
= config
['sources']
666 schedulers
= config
['schedulers']
667 builders
= config
['builders']
668 slavePortnum
= config
['slavePortnum']
671 debugPassword
= config
.get('debugPassword')
672 manhole
= config
.get('manhole')
673 status
= config
.get('status', [])
674 projectName
= config
.get('projectName')
675 projectURL
= config
.get('projectURL')
676 buildbotURL
= config
.get('buildbotURL')
679 log
.msg("config dictionary is missing a required parameter")
680 log
.msg("leaving old configuration in place")
683 # do some validation first
684 for name
, passwd
in bots
:
685 if name
in ("debug", "change", "status"):
686 raise KeyError, "reserved name '%s' used for a bot" % name
687 if config
.has_key('interlocks'):
688 raise KeyError("c['interlocks'] is no longer accepted")
690 assert isinstance(sources
, (list, tuple))
692 assert interfaces
.IChangeSource(s
, None)
693 # this assertion catches c['schedulers'] = Scheduler(), since
694 # Schedulers are service.MultiServices and thus iterable.
695 errmsg
= "c['schedulers'] must be a list of Scheduler instances"
696 assert isinstance(schedulers
, (list, tuple)), errmsg
698 assert interfaces
.IScheduler(s
, None), errmsg
699 assert isinstance(status
, (list, tuple))
701 assert interfaces
.IStatusReceiver(s
, None)
703 slavenames
= [name
for name
,pw
in bots
]
708 raise ValueError("builder %s must be defined with a dict, "
709 "not a tuple" % b
[0])
710 if b
.has_key('slavename') and b
['slavename'] not in slavenames
:
711 raise ValueError("builder %s uses undefined slave %s" \
712 % (b
['name'], b
['slavename']))
713 for n
in b
.get('slavenames', []):
714 if n
not in slavenames
:
715 raise ValueError("builder %s uses undefined slave %s" \
717 if b
['name'] in buildernames
:
718 raise ValueError("duplicate builder name %s"
720 buildernames
.append(b
['name'])
721 if b
['builddir'] in dirnames
:
722 raise ValueError("builder %s reuses builddir %s"
723 % (b
['name'], b
['builddir']))
724 dirnames
.append(b
['builddir'])
728 for b
in s
.listBuilderNames():
729 assert b
in buildernames
, \
730 "%s uses unknown builder %s" % (s
, b
)
731 if s
.name
in schedulernames
:
732 # TODO: schedulers share a namespace with other Service
733 # children of the BuildMaster node, like status plugins, the
734 # Manhole, the ChangeMaster, and the BotMaster (although most
735 # of these don't have names)
736 msg
= ("Schedulers must have unique names, but "
737 "'%s' was a duplicate" % (s
.name
,))
738 raise ValueError(msg
)
739 schedulernames
.append(s
.name
)
741 # assert that all locks used by the Builds and their Steps are
745 for l
in b
.get('locks', []):
746 if locks
.has_key(l
.name
):
747 if locks
[l
.name
] is not l
:
748 raise ValueError("Two different locks (%s and %s) "
750 % (l
, locks
[l
.name
], l
.name
))
753 # TODO: this will break with any BuildFactory that doesn't use a
754 # .steps list, but I think the verification step is more
756 for s
in b
['factory'].steps
:
757 for l
in s
[1].get('locks', []):
758 if locks
.has_key(l
.name
):
759 if locks
[l
.name
] is not l
:
760 raise ValueError("Two different locks (%s and %s)"
762 % (l
, locks
[l
.name
], l
.name
))
766 # slavePortnum supposed to be a strports specification
767 if type(slavePortnum
) is int:
768 slavePortnum
= "tcp:%d" % slavePortnum
770 # now we're committed to implementing the new configuration, so do
772 # TODO: actually, this is spread across a couple of Deferreds, so it
773 # really isn't atomic.
775 d
= defer
.succeed(None)
777 self
.projectName
= projectName
778 self
.projectURL
= projectURL
779 self
.buildbotURL
= buildbotURL
781 # self.bots: Disconnect any that were attached and removed from the
782 # list. Update self.checker with the new list of passwords,
783 # including debug/change/status.
784 d
.addCallback(lambda res
: self
.loadConfig_Slaves(bots
))
788 self
.checker
.addUser("debug", debugPassword
)
789 self
.debugPassword
= debugPassword
792 if manhole
!= self
.manhole
:
795 # disownServiceParent may return a Deferred
796 d
.addCallback(lambda res
: self
.manhole
.disownServiceParent())
800 d
.addCallback(_remove
)
803 self
.manhole
= manhole
804 manhole
.setServiceParent(self
)
807 # add/remove self.botmaster.builders to match builders. The
808 # botmaster will handle startup/shutdown issues.
809 d
.addCallback(lambda res
: self
.loadConfig_Builders(builders
))
811 d
.addCallback(lambda res
: self
.loadConfig_status(status
))
813 # Schedulers are added after Builders in case they start right away
814 d
.addCallback(lambda res
: self
.loadConfig_Schedulers(schedulers
))
815 # and Sources go after Schedulers for the same reason
816 d
.addCallback(lambda res
: self
.loadConfig_Sources(sources
))
819 if self
.slavePortnum
!= slavePortnum
:
821 def closeSlavePort(res
):
822 d1
= self
.slavePort
.disownServiceParent()
823 self
.slavePort
= None
825 d
.addCallback(closeSlavePort
)
826 if slavePortnum
is not None:
827 def openSlavePort(res
):
828 self
.slavePort
= strports
.service(slavePortnum
,
830 self
.slavePort
.setServiceParent(self
)
831 d
.addCallback(openSlavePort
)
832 log
.msg("BuildMaster listening on port %s" % slavePortnum
)
833 self
.slavePortnum
= slavePortnum
835 log
.msg("configuration update started")
837 self
.readConfig
= True
838 log
.msg("configuration update complete")
840 d
.addCallback(lambda res
: self
.botmaster
.maybeStartAllBuilds())
843 def loadConfig_Slaves(self
, bots
):
844 # set up the Checker with the names and passwords of all valid bots
845 self
.checker
.users
= {} # violates abstraction, oh well
846 for user
, passwd
in bots
:
847 self
.checker
.addUser(user
, passwd
)
848 self
.checker
.addUser("change", "changepw")
850 # identify new/old bots
851 old
= self
.bots
; oldnames
= [name
for name
,pw
in old
]
852 new
= bots
; newnames
= [name
for name
,pw
in new
]
853 # removeSlave will hang up on the old bot
854 dl
= [self
.botmaster
.removeSlave(name
)
855 for name
in oldnames
if name
not in newnames
]
856 [self
.botmaster
.addSlave(name
)
857 for name
in newnames
if name
not in oldnames
]
861 return defer
.DeferredList(dl
, fireOnOneErrback
=1, consumeErrors
=0)
863 def loadConfig_Sources(self
, sources
):
864 log
.msg("loadConfig_Sources, change_svc is", self
.change_svc
,
865 self
.change_svc
.parent
)
866 # shut down any that were removed, start any that were added
867 deleted_sources
= [s
for s
in self
.change_svc
if s
not in sources
]
868 added_sources
= [s
for s
in sources
if s
not in self
.change_svc
]
869 dl
= [self
.change_svc
.removeSource(s
) for s
in deleted_sources
]
871 [self
.change_svc
.addSource(s
) for s
in added_sources
]
872 d
= defer
.DeferredList(dl
, fireOnOneErrback
=1, consumeErrors
=0)
873 d
.addCallback(addNewOnes
)
876 def allSchedulers(self
):
877 # TODO: when twisted-1.3 compatibility is dropped, switch to the
878 # providedBy form, because it's faster (no actual adapter lookup)
879 return [child
for child
in self
880 #if interfaces.IScheduler.providedBy(child)]
881 if interfaces
.IScheduler(child
, None)]
884 def loadConfig_Schedulers(self
, newschedulers
):
885 oldschedulers
= self
.allSchedulers()
886 removed
= [s
for s
in oldschedulers
if s
not in newschedulers
]
887 added
= [s
for s
in newschedulers
if s
not in oldschedulers
]
888 dl
= [defer
.maybeDeferred(s
.disownServiceParent
) for s
in removed
]
891 s
.setServiceParent(self
)
892 d
= defer
.DeferredList(dl
, fireOnOneErrback
=1)
893 d
.addCallback(addNewOnes
)
896 def loadConfig_Builders(self
, newBuilderData
):
897 somethingChanged
= False
900 allBuilders
= self
.botmaster
.builders
.copy()
901 for data
in newBuilderData
:
904 newBuilderNames
.append(name
)
906 # identify all that were removed
907 for oldname
in self
.botmaster
.getBuildernames():
908 if oldname
not in newList
:
909 log
.msg("removing old builder %s" % oldname
)
910 del allBuilders
[oldname
]
911 somethingChanged
= True
912 # announce the change
913 self
.status
.builderRemoved(oldname
)
915 # everything in newList is either unchanged, changed, or new
916 for name
, data
in newList
.items():
917 old
= self
.botmaster
.builders
.get(name
)
918 basedir
= data
['builddir'] # used on both master and slave
919 #name, slave, builddir, factory = data
921 # category added after 0.6.2
922 category
= data
.get('category', None)
923 log
.msg("adding new builder %s for category %s" %
925 statusbag
= self
.status
.builderAdded(name
, basedir
, category
)
926 builder
= Builder(data
, statusbag
)
927 allBuilders
[name
] = builder
928 somethingChanged
= True
929 elif old
.compareToSetup(data
):
930 # changed: try to minimize the disruption and only modify the
931 # pieces that really changed
932 diffs
= old
.compareToSetup(data
)
933 log
.msg("updating builder %s: %s" % (name
, "\n".join(diffs
)))
935 statusbag
= old
.builder_status
936 statusbag
.saveYourself() # seems like a good idea
937 # TODO: if the basedir was changed, we probably need to make
939 new_builder
= Builder(data
, statusbag
)
940 new_builder
.consumeTheSoulOfYourPredecessor(old
)
941 # that migrates any retained slavebuilders too
943 # point out that the builder was updated. On the Waterfall,
944 # this will appear just after any currently-running builds.
945 statusbag
.addPointEvent(["config", "updated"])
947 allBuilders
[name
] = new_builder
948 somethingChanged
= True
950 # unchanged: leave it alone
951 log
.msg("builder %s is unchanged" % name
)
955 sortedAllBuilders
= [allBuilders
[name
] for name
in newBuilderNames
]
956 d
= self
.botmaster
.setBuilders(sortedAllBuilders
)
960 def loadConfig_status(self
, status
):
964 for s
in self
.statusTargets
[:]:
966 log
.msg("removing IStatusReceiver", s
)
967 d
= defer
.maybeDeferred(s
.disownServiceParent
)
969 self
.statusTargets
.remove(s
)
970 # after those are finished going away, add new ones
973 if not s
in self
.statusTargets
:
974 log
.msg("adding IStatusReceiver", s
)
975 s
.setServiceParent(self
)
976 self
.statusTargets
.append(s
)
977 d
= defer
.DeferredList(dl
, fireOnOneErrback
=1)
978 d
.addCallback(addNewOnes
)
982 def addChange(self
, change
):
983 for s
in self
.allSchedulers():
986 def submitBuildSet(self
, bs
):
987 # determine the set of Builders to use
989 for name
in bs
.builderNames
:
990 b
= self
.botmaster
.builders
.get(name
)
992 if b
not in builders
:
995 # TODO: add aliases like 'all'
996 raise KeyError("no such builder named '%s'" % name
)
998 # now tell the BuildSet to create BuildRequests for all those
999 # Builders and submit them
1001 self
.status
.buildsetSubmitted(bs
.status
)
1006 implements(interfaces
.IControl
)
1008 __implements__
= interfaces
.IControl
,
1010 def __init__(self
, master
):
1011 self
.master
= master
1013 def addChange(self
, change
):
1014 self
.master
.change_svc
.addChange(change
)
1016 def submitBuildSet(self
, bs
):
1017 self
.master
.submitBuildSet(bs
)
1019 def getBuilder(self
, name
):
1020 b
= self
.master
.botmaster
.builders
[name
]
1021 return interfaces
.IBuilderControl(b
)
1023 components
.registerAdapter(Control
, BuildMaster
, interfaces
.IControl
)
1025 # so anybody who can get a handle on the BuildMaster can cause a build with:
1026 # IControl(master).getBuilder("full-2.3").requestBuild(buildrequest)