1 # -*- test-case-name: buildbot.test.test_run -*-
15 from twisted
.python
import log
, components
16 from twisted
.internet
import defer
, reactor
17 from twisted
.spread
import pb
18 from twisted
.cred
import portal
, checkers
19 from twisted
.application
import service
, strports
20 from twisted
.persisted
import styles
23 from buildbot
.twcompat
import implements
24 from buildbot
.util
import now
25 from buildbot
.pbutil
import NewCredPerspective
26 from buildbot
.process
.builder
import Builder
, IDLE
27 from buildbot
.process
.base
import BuildRequest
28 from buildbot
.status
.builder
import SlaveStatus
, Status
29 from buildbot
.changes
.changes
import Change
, ChangeMaster
30 from buildbot
.sourcestamp
import SourceStamp
31 from buildbot
import interfaces
33 ########################################
38 class BotPerspective(NewCredPerspective
):
39 """This is the master-side representative for a remote buildbot slave.
40 There is exactly one for each slave described in the config file (the
41 c['bots'] list). When buildbots connect in (.attach), they get a
42 reference to this instance. The BotMaster object is stashed as the
43 .service attribute."""
45 def __init__(self
, name
, botmaster
):
47 self
.botmaster
= botmaster
48 self
.slave_status
= SlaveStatus(name
)
49 self
.slave
= None # a RemoteReference to the Bot, when connected
50 self
.slave_commands
= None
52 def updateSlave(self
):
53 """Called to add or remove builders after the slave has connected.
55 @return: a Deferred that indicates when an attached slave has
56 accepted the new builders and/or released the old ones."""
58 return self
.sendBuilderList()
59 return defer
.succeed(None)
62 return "<BotPerspective '%s', builders: %s>" % \
64 string
.join(map(lambda b
: b
.name
, self
.builders
), ','))
66 def attached(self
, bot
):
67 """This is called when the slave connects.
69 @return: a Deferred that fires with a suitable pb.IPerspective to
70 give to the slave (i.e. 'self')"""
73 # uh-oh, we've got a duplicate slave. The most likely
74 # explanation is that the slave is behind a slow link, thinks we
75 # went away, and has attempted to reconnect, so we've got two
76 # "connections" from the same slave, but the previous one is
77 # stale. Give the new one precedence.
78 log
.msg("duplicate slave %s replacing old one" % self
.slavename
)
80 # just in case we've got two identically-configured slaves,
81 # report the IP addresses of both so someone can resolve the
83 tport
= self
.slave
.broker
.transport
84 log
.msg("old slave was connected from", tport
.getPeer())
85 log
.msg("new slave is from", bot
.broker
.transport
.getPeer())
88 d
= defer
.succeed(None)
89 # now we go through a sequence of calls, gathering information, then
90 # tell the Botmaster that it can finally give this slave to all the
91 # Builders that care about it.
93 # we accumulate slave information in this 'state' dictionary, then
94 # set it atomically if we make it far enough through the process
97 def _log_attachment_on_slave(res
):
98 d1
= bot
.callRemote("print", "attached")
99 d1
.addErrback(lambda why
: None)
101 d
.addCallback(_log_attachment_on_slave
)
104 d1
= bot
.callRemote("getSlaveInfo")
106 log
.msg("Got slaveinfo from '%s'" % self
.slavename
)
107 # TODO: info{} might have other keys
108 state
["admin"] = info
.get("admin")
109 state
["host"] = info
.get("host")
110 def _info_unavailable(why
):
111 # maybe an old slave, doesn't implement remote_getSlaveInfo
112 log
.msg("BotPerspective.info_unavailable")
114 d1
.addCallbacks(_got_info
, _info_unavailable
)
116 d
.addCallback(_get_info
)
118 def _get_commands(res
):
119 d1
= bot
.callRemote("getCommands")
120 def _got_commands(commands
):
121 state
["slave_commands"] = commands
122 def _commands_unavailable(why
):
123 # probably an old slave
124 log
.msg("BotPerspective._commands_unavailable")
125 if why
.check(AttributeError):
128 d1
.addCallbacks(_got_commands
, _commands_unavailable
)
130 d
.addCallback(_get_commands
)
132 def _accept_slave(res
):
133 self
.slave_status
.setAdmin(state
.get("admin"))
134 self
.slave_status
.setHost(state
.get("host"))
135 self
.slave_status
.setConnected(True)
136 self
.slave_commands
= state
.get("slave_commands")
138 log
.msg("bot attached")
139 return self
.updateSlave()
140 d
.addCallback(_accept_slave
)
142 # Finally, the slave gets a reference to this BotPerspective. They
143 # receive this later, after we've started using them.
144 d
.addCallback(lambda res
: self
)
147 def detached(self
, mind
):
149 self
.slave_status
.setConnected(False)
150 self
.botmaster
.slaveLost(self
)
151 log
.msg("BotPerspective.detached(%s)" % self
.slavename
)
154 def disconnect(self
):
155 """Forcibly disconnect the slave.
157 This severs the TCP connection and returns a Deferred that will fire
158 (with None) when the connection is probably gone.
160 If the slave is still alive, they will probably try to reconnect
163 This is called in two circumstances. The first is when a slave is
164 removed from the config file. In this case, when they try to
165 reconnect, they will be rejected as an unknown slave. The second is
166 when we wind up with two connections for the same slave, in which
167 case we disconnect the older connection.
171 return defer
.succeed(None)
172 log
.msg("disconnecting old slave %s now" % self
.slavename
)
174 # all kinds of teardown will happen as a result of
175 # loseConnection(), but it happens after a reactor iteration or
176 # two. Hook the actual disconnect so we can know when it is safe
177 # to connect the new slave. We have to wait one additional
178 # iteration (with callLater(0)) to make sure the *other*
179 # notifyOnDisconnect handlers have had a chance to run.
182 # notifyOnDisconnect runs the callback with one argument, the
183 # RemoteReference being disconnected.
184 def _disconnected(rref
):
185 reactor
.callLater(0, d
.callback
, None)
186 self
.slave
.notifyOnDisconnect(_disconnected
)
187 tport
= self
.slave
.broker
.transport
188 # this is the polite way to request that a socket be closed
189 tport
.loseConnection()
191 # but really we don't want to wait for the transmit queue to
192 # drain. The remote end is unlikely to ACK the data, so we'd
193 # probably have to wait for a (20-minute) TCP timeout.
194 #tport._closeSocket()
195 # however, doing _closeSocket (whether before or after
196 # loseConnection) somehow prevents the notifyOnDisconnect
197 # handlers from being run. Bummer.
199 tport
.dataBuffer
= ""
202 # however, these hacks are pretty internal, so don't blow up if
203 # they fail or are unavailable
204 log
.msg("failed to accelerate the shutdown process")
206 log
.msg("waiting for slave to finish disconnecting")
208 # When this Deferred fires, we'll be ready to accept the new slave
211 def sendBuilderList(self
):
212 our_builders
= self
.botmaster
.getBuildersForSlave(self
.slavename
)
213 blist
= [(b
.name
, b
.builddir
) for b
in our_builders
]
214 d
= self
.slave
.callRemote("setBuilderList", blist
)
217 for name
, remote
in slist
.items():
218 # use get() since we might have changed our mind since then
219 b
= self
.botmaster
.builders
.get(name
)
221 d1
= b
.attached(self
, remote
, self
.slave_commands
)
223 return defer
.DeferredList(dl
)
224 def _set_failed(why
):
225 log
.msg("BotPerspective.sendBuilderList (%s) failed" % self
)
227 # TODO: hang up on them?, without setBuilderList we can't use
229 d
.addCallbacks(_sent
, _set_failed
)
232 def perspective_keepalive(self
):
236 class BotMaster(service
.Service
):
238 """This is the master-side service which manages remote buildbot slaves.
239 It provides them with BotPerspectives, and distributes file change
240 notification messages to them.
247 self
.builderNames
= []
248 # builders maps Builder names to instances of bb.p.builder.Builder,
249 # which is the master-side object that defines and controls a build.
250 # They are added by calling botmaster.addBuilder() from the startup
253 # self.slaves contains a ready BotPerspective instance for each
254 # potential buildslave, i.e. all the ones listed in the config file.
255 # If the slave is connected, self.slaves[slavename].slave will
256 # contain a RemoteReference to their Bot instance. If it is not
257 # connected, that attribute will hold None.
258 self
.slaves
= {} # maps slavename to BotPerspective
259 self
.statusClientService
= None
262 # self.locks holds the real Lock instances
265 # these four are convenience functions for testing
267 def waitUntilBuilderAttached(self
, name
):
268 b
= self
.builders
[name
]
270 # return defer.succeed(None)
272 b
.watchers
['attach'].append(d
)
275 def waitUntilBuilderDetached(self
, name
):
276 b
= self
.builders
.get(name
)
277 if not b
or not b
.slaves
:
278 return defer
.succeed(None)
280 b
.watchers
['detach'].append(d
)
283 def waitUntilBuilderFullyDetached(self
, name
):
284 b
= self
.builders
.get(name
)
285 # TODO: this looks too deeply inside the Builder object
286 if not b
or not b
.slaves
:
287 return defer
.succeed(None)
289 b
.watchers
['detach_all'].append(d
)
292 def waitUntilBuilderIdle(self
, name
):
293 b
= self
.builders
[name
]
294 # TODO: this looks way too deeply inside the Builder object
298 b
.watchers
['idle'].append(d
)
300 return defer
.succeed(None)
303 def addSlave(self
, slavename
):
304 slave
= BotPerspective(slavename
, self
)
305 self
.slaves
[slavename
] = slave
307 def removeSlave(self
, slavename
):
308 d
= self
.slaves
[slavename
].disconnect()
309 del self
.slaves
[slavename
]
312 def slaveLost(self
, bot
):
313 for name
, b
in self
.builders
.items():
314 if bot
.slavename
in b
.slavenames
:
317 def getBuildersForSlave(self
, slavename
):
319 for b
in self
.builders
.values()
320 if slavename
in b
.slavenames
]
322 def getBuildernames(self
):
323 return self
.builderNames
325 def getBuilders(self
):
326 allBuilders
= [self
.builders
[name
] for name
in self
.builderNames
]
329 def setBuilders(self
, builders
):
331 self
.builderNames
= []
333 for slavename
in b
.slavenames
:
334 # this is actually validated earlier
335 assert slavename
in self
.slaves
336 self
.builders
[b
.name
] = b
337 self
.builderNames
.append(b
.name
)
339 d
= self
._updateAllSlaves
()
342 def _updateAllSlaves(self
):
343 """Notify all buildslaves about changes in their Builders."""
344 dl
= [s
.updateSlave() for s
in self
.slaves
.values()]
345 return defer
.DeferredList(dl
)
347 def maybeStartAllBuilds(self
):
348 for b
in self
.builders
.values():
351 def getPerspective(self
, slavename
):
352 return self
.slaves
[slavename
]
354 def shutdownSlaves(self
):
355 # TODO: make this into a bot method rather than a builder method
356 for b
in self
.slaves
.values():
359 def stopService(self
):
360 for b
in self
.builders
.values():
361 b
.builder_status
.addPointEvent(["master", "shutdown"])
362 b
.builder_status
.saveYourself()
363 return service
.Service
.stopService(self
)
365 def getLockByID(self
, lockid
):
366 """Convert a Lock identifier into an actual Lock instance.
367 @param lockid: a locks.MasterLock or locks.SlaveLock instance
368 @return: a locks.RealMasterLock or locks.RealSlaveLock instance
370 if not lockid
in self
.locks
:
371 self
.locks
[lockid
] = lockid
.lockClass(lockid
)
372 # if the master.cfg file has changed maxCount= on the lock, the next
373 # time a build is started, they'll get a new RealLock instance. Note
374 # that this requires that MasterLock and SlaveLock (marker) instances
375 # be hashable and that they should compare properly.
376 return self
.locks
[lockid
]
378 ########################################
382 class DebugPerspective(NewCredPerspective
):
383 def attached(self
, mind
):
385 def detached(self
, mind
):
388 def perspective_requestBuild(self
, buildername
, reason
, branch
, revision
):
389 c
= interfaces
.IControl(self
.master
)
390 bc
= c
.getBuilder(buildername
)
391 ss
= SourceStamp(branch
, revision
)
392 br
= BuildRequest(reason
, ss
, buildername
)
395 def perspective_pingBuilder(self
, buildername
):
396 c
= interfaces
.IControl(self
.master
)
397 bc
= c
.getBuilder(buildername
)
400 def perspective_fakeChange(self
, file, revision
=None, who
="fakeUser",
402 change
= Change(who
, [file], "some fake comments\n",
403 branch
=branch
, revision
=revision
)
404 c
= interfaces
.IControl(self
.master
)
407 def perspective_setCurrentState(self
, buildername
, state
):
408 builder
= self
.botmaster
.builders
.get(buildername
)
409 if not builder
: return
410 if state
== "offline":
411 builder
.statusbag
.currentlyOffline()
413 builder
.statusbag
.currentlyIdle()
414 if state
== "waiting":
415 builder
.statusbag
.currentlyWaiting(now()+10)
416 if state
== "building":
417 builder
.statusbag
.currentlyBuilding(None)
418 def perspective_reload(self
):
419 print "doing reload of the config file"
420 self
.master
.loadTheConfigFile()
421 def perspective_pokeIRC(self
):
422 print "saying something on IRC"
423 from buildbot
.status
import words
424 for s
in self
.master
:
425 if isinstance(s
, words
.IRC
):
427 for channel
in bot
.channels
:
428 print " channel", channel
429 bot
.p
.msg(channel
, "Ow, quit it")
431 def perspective_print(self
, msg
):
434 class Dispatcher(styles
.Versioned
):
436 implements(portal
.IRealm
)
438 __implements__
= portal
.IRealm
,
439 persistenceVersion
= 2
444 def upgradeToVersion1(self
):
445 self
.master
= self
.botmaster
.parent
446 def upgradeToVersion2(self
):
449 def register(self
, name
, afactory
):
450 self
.names
[name
] = afactory
451 def unregister(self
, name
):
454 def requestAvatar(self
, avatarID
, mind
, interface
):
455 assert interface
== pb
.IPerspective
456 afactory
= self
.names
.get(avatarID
)
458 p
= afactory
.getPerspective()
459 elif avatarID
== "debug":
460 p
= DebugPerspective()
461 p
.master
= self
.master
462 p
.botmaster
= self
.botmaster
463 elif avatarID
== "statusClient":
464 p
= self
.statusClientService
.getPerspective()
466 # it must be one of the buildslaves: no other names will make it
468 p
= self
.botmaster
.getPerspective(avatarID
)
471 raise ValueError("no perspective for '%s'" % avatarID
)
473 d
= defer
.maybeDeferred(p
.attached
, mind
)
474 d
.addCallback(self
._avatarAttached
, mind
)
477 def _avatarAttached(self
, p
, mind
):
478 return (pb
.IPerspective
, p
, lambda p
=p
,mind
=mind
: p
.detached(mind
))
480 ########################################
486 # all IChangeSource objects
487 # StatusClientService
488 # TCPClient(self.ircFactory)
489 # TCPServer(self.slaveFactory) -> dispatcher.requestAvatar
490 # TCPServer(self.site)
491 # UNIXServer(ResourcePublisher(self.site))
494 class BuildMaster(service
.MultiService
, styles
.Versioned
):
496 persistenceVersion
= 3
499 projectName
= "(unspecified)"
504 def __init__(self
, basedir
, configFileName
="master.cfg"):
505 service
.MultiService
.__init
__(self
)
506 self
.setName("buildmaster")
507 self
.basedir
= basedir
508 self
.configFileName
= configFileName
510 # the dispatcher is the realm in which all inbound connections are
511 # looked up: slave builders, change notifications, status clients, and
513 dispatcher
= Dispatcher()
514 dispatcher
.master
= self
515 self
.dispatcher
= dispatcher
516 self
.checker
= checkers
.InMemoryUsernamePasswordDatabaseDontUse()
517 # the checker starts with no user/passwd pairs: they are added later
518 p
= portal
.Portal(dispatcher
)
519 p
.registerChecker(self
.checker
)
520 self
.slaveFactory
= pb
.PBServerFactory(p
)
521 self
.slaveFactory
.unsafeTracebacks
= True # let them see exceptions
523 self
.slavePortnum
= None
524 self
.slavePort
= None
526 self
.botmaster
= BotMaster()
527 self
.botmaster
.setName("botmaster")
528 self
.botmaster
.setServiceParent(self
)
529 dispatcher
.botmaster
= self
.botmaster
531 self
.status
= Status(self
.botmaster
, self
.basedir
)
533 self
.statusTargets
= []
536 # this ChangeMaster is a dummy, only used by tests. In the real
537 # buildmaster, where the BuildMaster instance is activated
538 # (startService is called) by twistd, this attribute is overwritten.
539 self
.useChanges(ChangeMaster())
541 self
.readConfig
= False
543 def upgradeToVersion1(self
):
544 self
.dispatcher
= self
.slaveFactory
.root
.portal
.realm
546 def upgradeToVersion2(self
): # post-0.4.3
547 self
.webServer
= self
.webTCPPort
549 self
.webDistribServer
= self
.webUNIXPort
551 self
.configFileName
= "master.cfg"
553 def upgradeToVersion3(self
):
554 # post 0.6.3, solely to deal with the 0.6.3 breakage. Starting with
555 # 0.6.5 I intend to do away with .tap files altogether
557 self
.namedServices
= {}
560 def startService(self
):
561 service
.MultiService
.startService(self
)
562 self
.loadChanges() # must be done before loading the config file
563 if not self
.readConfig
:
564 # TODO: consider catching exceptions during this call to
565 # loadTheConfigFile and bailing (reactor.stop) if it fails,
566 # since without a config file we can't do anything except reload
567 # the config file, and it would be nice for the user to discover
569 self
.loadTheConfigFile()
570 if signal
and hasattr(signal
, "SIGHUP"):
571 signal
.signal(signal
.SIGHUP
, self
._handleSIGHUP
)
572 for b
in self
.botmaster
.builders
.values():
573 b
.builder_status
.addPointEvent(["master", "started"])
574 b
.builder_status
.saveYourself()
576 def useChanges(self
, changes
):
578 # TODO: can return a Deferred
579 self
.change_svc
.disownServiceParent()
580 self
.change_svc
= changes
581 self
.change_svc
.basedir
= self
.basedir
582 self
.change_svc
.setName("changemaster")
583 self
.dispatcher
.changemaster
= self
.change_svc
584 self
.change_svc
.setServiceParent(self
)
586 def loadChanges(self
):
587 filename
= os
.path
.join(self
.basedir
, "changes.pck")
589 changes
= pickle
.load(open(filename
, "rb"))
592 log
.msg("changes.pck missing, using new one")
593 changes
= ChangeMaster()
595 log
.msg("corrupted changes.pck, using new one")
596 changes
= ChangeMaster()
597 self
.useChanges(changes
)
599 def _handleSIGHUP(self
, *args
):
600 reactor
.callLater(0, self
.loadTheConfigFile
)
604 @rtype: L{buildbot.status.builder.Status}
608 def loadTheConfigFile(self
, configFile
=None):
610 configFile
= os
.path
.join(self
.basedir
, self
.configFileName
)
612 log
.msg("loading configuration from %s" % configFile
)
613 configFile
= os
.path
.expanduser(configFile
)
616 f
= open(configFile
, "r")
618 log
.msg("unable to open config file '%s'" % configFile
)
619 log
.msg("leaving old configuration in place")
626 log
.msg("error during loadConfig")
630 def loadConfig(self
, f
):
631 """Internal function to load a specific configuration file. Any
632 errors in the file will be signalled by raising an exception.
634 @return: a Deferred that will fire (with None) when the configuration
635 changes have been completed. This may involve a round-trip to each
636 buildslave that was involved."""
638 localDict
= {'basedir': os
.path
.expanduser(self
.basedir
)}
642 log
.msg("error while parsing config file")
646 config
= localDict
['BuildmasterConfig']
648 log
.err("missing config dictionary")
649 log
.err("config file must define BuildmasterConfig")
652 known_keys
= "bots sources schedulers builders slavePortnum " + \
653 "debugPassword manhole " + \
654 "status projectName projectURL buildbotURL"
655 known_keys
= known_keys
.split()
656 for k
in config
.keys():
657 if k
not in known_keys
:
658 log
.msg("unknown key '%s' defined in config dictionary" % k
)
662 bots
= config
['bots']
663 sources
= config
['sources']
664 schedulers
= config
['schedulers']
665 builders
= config
['builders']
666 slavePortnum
= config
['slavePortnum']
669 debugPassword
= config
.get('debugPassword')
670 manhole
= config
.get('manhole')
671 status
= config
.get('status', [])
672 projectName
= config
.get('projectName')
673 projectURL
= config
.get('projectURL')
674 buildbotURL
= config
.get('buildbotURL')
677 log
.msg("config dictionary is missing a required parameter")
678 log
.msg("leaving old configuration in place")
681 # do some validation first
682 for name
, passwd
in bots
:
683 if name
in ("debug", "change", "status"):
684 raise KeyError, "reserved name '%s' used for a bot" % name
685 if config
.has_key('interlocks'):
686 raise KeyError("c['interlocks'] is no longer accepted")
688 assert isinstance(sources
, (list, tuple))
690 assert interfaces
.IChangeSource(s
, None)
691 # this assertion catches c['schedulers'] = Scheduler(), since
692 # Schedulers are service.MultiServices and thus iterable.
693 errmsg
= "c['schedulers'] must be a list of Scheduler instances"
694 assert isinstance(schedulers
, (list, tuple)), errmsg
696 assert interfaces
.IScheduler(s
, None), errmsg
697 assert isinstance(status
, (list, tuple))
699 assert interfaces
.IStatusReceiver(s
, None)
701 slavenames
= [name
for name
,pw
in bots
]
706 raise ValueError("builder %s must be defined with a dict, "
707 "not a tuple" % b
[0])
708 if b
.has_key('slavename') and b
['slavename'] not in slavenames
:
709 raise ValueError("builder %s uses undefined slave %s" \
710 % (b
['name'], b
['slavename']))
711 for n
in b
.get('slavenames', []):
712 if n
not in slavenames
:
713 raise ValueError("builder %s uses undefined slave %s" \
715 if b
['name'] in buildernames
:
716 raise ValueError("duplicate builder name %s"
718 buildernames
.append(b
['name'])
719 if b
['builddir'] in dirnames
:
720 raise ValueError("builder %s reuses builddir %s"
721 % (b
['name'], b
['builddir']))
722 dirnames
.append(b
['builddir'])
726 for b
in s
.listBuilderNames():
727 assert b
in buildernames
, \
728 "%s uses unknown builder %s" % (s
, b
)
729 if s
.name
in schedulernames
:
730 # TODO: schedulers share a namespace with other Service
731 # children of the BuildMaster node, like status plugins, the
732 # Manhole, the ChangeMaster, and the BotMaster (although most
733 # of these don't have names)
734 msg
= ("Schedulers must have unique names, but "
735 "'%s' was a duplicate" % (s
.name
,))
736 raise ValueError(msg
)
737 schedulernames
.append(s
.name
)
739 # assert that all locks used by the Builds and their Steps are
743 for l
in b
.get('locks', []):
744 if locks
.has_key(l
.name
):
745 if locks
[l
.name
] is not l
:
746 raise ValueError("Two different locks (%s and %s) "
748 % (l
, locks
[l
.name
], l
.name
))
751 # TODO: this will break with any BuildFactory that doesn't use a
752 # .steps list, but I think the verification step is more
754 for s
in b
['factory'].steps
:
755 for l
in s
[1].get('locks', []):
756 if locks
.has_key(l
.name
):
757 if locks
[l
.name
] is not l
:
758 raise ValueError("Two different locks (%s and %s)"
760 % (l
, locks
[l
.name
], l
.name
))
764 # slavePortnum supposed to be a strports specification
765 if type(slavePortnum
) is int:
766 slavePortnum
= "tcp:%d" % slavePortnum
768 # now we're committed to implementing the new configuration, so do
770 # TODO: actually, this is spread across a couple of Deferreds, so it
771 # really isn't atomic.
773 d
= defer
.succeed(None)
775 self
.projectName
= projectName
776 self
.projectURL
= projectURL
777 self
.buildbotURL
= buildbotURL
779 # self.bots: Disconnect any that were attached and removed from the
780 # list. Update self.checker with the new list of passwords,
781 # including debug/change/status.
782 d
.addCallback(lambda res
: self
.loadConfig_Slaves(bots
))
786 self
.checker
.addUser("debug", debugPassword
)
787 self
.debugPassword
= debugPassword
790 if manhole
!= self
.manhole
:
793 # disownServiceParent may return a Deferred
794 d
.addCallback(lambda res
: self
.manhole
.disownServiceParent())
798 d
.addCallback(_remove
)
801 self
.manhole
= manhole
802 manhole
.setServiceParent(self
)
805 # add/remove self.botmaster.builders to match builders. The
806 # botmaster will handle startup/shutdown issues.
807 d
.addCallback(lambda res
: self
.loadConfig_Builders(builders
))
809 d
.addCallback(lambda res
: self
.loadConfig_status(status
))
811 # Schedulers are added after Builders in case they start right away
812 d
.addCallback(lambda res
: self
.loadConfig_Schedulers(schedulers
))
813 # and Sources go after Schedulers for the same reason
814 d
.addCallback(lambda res
: self
.loadConfig_Sources(sources
))
817 if self
.slavePortnum
!= slavePortnum
:
819 def closeSlavePort(res
):
820 d1
= self
.slavePort
.disownServiceParent()
821 self
.slavePort
= None
823 d
.addCallback(closeSlavePort
)
824 if slavePortnum
is not None:
825 def openSlavePort(res
):
826 self
.slavePort
= strports
.service(slavePortnum
,
828 self
.slavePort
.setServiceParent(self
)
829 d
.addCallback(openSlavePort
)
830 log
.msg("BuildMaster listening on port %s" % slavePortnum
)
831 self
.slavePortnum
= slavePortnum
833 log
.msg("configuration update started")
835 self
.readConfig
= True
836 log
.msg("configuration update complete")
838 d
.addCallback(lambda res
: self
.botmaster
.maybeStartAllBuilds())
841 def loadConfig_Slaves(self
, bots
):
842 # set up the Checker with the names and passwords of all valid bots
843 self
.checker
.users
= {} # violates abstraction, oh well
844 for user
, passwd
in bots
:
845 self
.checker
.addUser(user
, passwd
)
846 self
.checker
.addUser("change", "changepw")
848 # identify new/old bots
849 old
= self
.bots
; oldnames
= [name
for name
,pw
in old
]
850 new
= bots
; newnames
= [name
for name
,pw
in new
]
851 # removeSlave will hang up on the old bot
852 dl
= [self
.botmaster
.removeSlave(name
)
853 for name
in oldnames
if name
not in newnames
]
854 [self
.botmaster
.addSlave(name
)
855 for name
in newnames
if name
not in oldnames
]
859 return defer
.DeferredList(dl
, fireOnOneErrback
=1, consumeErrors
=0)
861 def loadConfig_Sources(self
, sources
):
862 log
.msg("loadConfig_Sources, change_svc is", self
.change_svc
,
863 self
.change_svc
.parent
)
864 # shut down any that were removed, start any that were added
865 deleted_sources
= [s
for s
in self
.change_svc
if s
not in sources
]
866 added_sources
= [s
for s
in sources
if s
not in self
.change_svc
]
867 dl
= [self
.change_svc
.removeSource(s
) for s
in deleted_sources
]
869 [self
.change_svc
.addSource(s
) for s
in added_sources
]
870 d
= defer
.DeferredList(dl
, fireOnOneErrback
=1, consumeErrors
=0)
871 d
.addCallback(addNewOnes
)
874 def allSchedulers(self
):
875 # TODO: when twisted-1.3 compatibility is dropped, switch to the
876 # providedBy form, because it's faster (no actual adapter lookup)
877 return [child
for child
in self
878 #if interfaces.IScheduler.providedBy(child)]
879 if interfaces
.IScheduler(child
, None)]
882 def loadConfig_Schedulers(self
, newschedulers
):
883 oldschedulers
= self
.allSchedulers()
884 removed
= [s
for s
in oldschedulers
if s
not in newschedulers
]
885 added
= [s
for s
in newschedulers
if s
not in oldschedulers
]
886 dl
= [defer
.maybeDeferred(s
.disownServiceParent
) for s
in removed
]
889 s
.setServiceParent(self
)
890 d
= defer
.DeferredList(dl
, fireOnOneErrback
=1)
891 d
.addCallback(addNewOnes
)
894 def loadConfig_Builders(self
, newBuilderData
):
895 somethingChanged
= False
898 allBuilders
= self
.botmaster
.builders
.copy()
899 for data
in newBuilderData
:
902 newBuilderNames
.append(name
)
904 # identify all that were removed
905 for oldname
in self
.botmaster
.getBuildernames():
906 if oldname
not in newList
:
907 log
.msg("removing old builder %s" % oldname
)
908 del allBuilders
[oldname
]
909 somethingChanged
= True
910 # announce the change
911 self
.status
.builderRemoved(oldname
)
913 # everything in newList is either unchanged, changed, or new
914 for name
, data
in newList
.items():
915 old
= self
.botmaster
.builders
.get(name
)
916 basedir
= data
['builddir'] # used on both master and slave
917 #name, slave, builddir, factory = data
919 # category added after 0.6.2
920 category
= data
.get('category', None)
921 log
.msg("adding new builder %s for category %s" %
923 statusbag
= self
.status
.builderAdded(name
, basedir
, category
)
924 builder
= Builder(data
, statusbag
)
925 allBuilders
[name
] = builder
926 somethingChanged
= True
927 elif old
.compareToSetup(data
):
928 # changed: try to minimize the disruption and only modify the
929 # pieces that really changed
930 diffs
= old
.compareToSetup(data
)
931 log
.msg("updating builder %s: %s" % (name
, "\n".join(diffs
)))
933 statusbag
= old
.builder_status
934 statusbag
.saveYourself() # seems like a good idea
935 # TODO: if the basedir was changed, we probably need to make
937 new_builder
= Builder(data
, statusbag
)
938 new_builder
.consumeTheSoulOfYourPredecessor(old
)
939 # that migrates any retained slavebuilders too
941 # point out that the builder was updated. On the Waterfall,
942 # this will appear just after any currently-running builds.
943 statusbag
.addPointEvent(["config", "updated"])
945 allBuilders
[name
] = new_builder
946 somethingChanged
= True
948 # unchanged: leave it alone
949 log
.msg("builder %s is unchanged" % name
)
953 sortedAllBuilders
= [allBuilders
[name
] for name
in newBuilderNames
]
954 d
= self
.botmaster
.setBuilders(sortedAllBuilders
)
958 def loadConfig_status(self
, status
):
962 for s
in self
.statusTargets
[:]:
964 log
.msg("removing IStatusReceiver", s
)
965 d
= defer
.maybeDeferred(s
.disownServiceParent
)
967 self
.statusTargets
.remove(s
)
968 # after those are finished going away, add new ones
971 if not s
in self
.statusTargets
:
972 log
.msg("adding IStatusReceiver", s
)
973 s
.setServiceParent(self
)
974 self
.statusTargets
.append(s
)
975 d
= defer
.DeferredList(dl
, fireOnOneErrback
=1)
976 d
.addCallback(addNewOnes
)
980 def addChange(self
, change
):
981 for s
in self
.allSchedulers():
984 def submitBuildSet(self
, bs
):
985 # determine the set of Builders to use
987 for name
in bs
.builderNames
:
988 b
= self
.botmaster
.builders
.get(name
)
990 if b
not in builders
:
993 # TODO: add aliases like 'all'
994 raise KeyError("no such builder named '%s'" % name
)
996 # now tell the BuildSet to create BuildRequests for all those
997 # Builders and submit them
999 self
.status
.buildsetSubmitted(bs
.status
)
1004 implements(interfaces
.IControl
)
1006 __implements__
= interfaces
.IControl
,
1008 def __init__(self
, master
):
1009 self
.master
= master
1011 def addChange(self
, change
):
1012 self
.master
.change_svc
.addChange(change
)
1014 def submitBuildSet(self
, bs
):
1015 self
.master
.submitBuildSet(bs
)
1017 def getBuilder(self
, name
):
1018 b
= self
.master
.botmaster
.builders
[name
]
1019 return interfaces
.IBuilderControl(b
)
1021 components
.registerAdapter(Control
, BuildMaster
, interfaces
.IControl
)
1023 # so anybody who can get a handle on the BuildMaster can cause a build with:
1024 # IControl(master).getBuilder("full-2.3").requestBuild(buildrequest)