rename c['bots'] to c['slaves'], and use buildbot.slave.BuildSlave instances instead...
[buildbot.git] / buildbot / master.py
blobd5697181ea7a3d2628d41f35236668c214dc9009
1 # -*- test-case-name: buildbot.test.test_run -*-
3 import string, os
4 signal = None
5 try:
6 import signal
7 except ImportError:
8 pass
9 try:
10 import cPickle
11 pickle = cPickle
12 except ImportError:
13 import pickle
14 import warnings
16 from zope.interface import implements
17 from twisted.python import log, components
18 from twisted.internet import defer, reactor
19 from twisted.spread import pb
20 from twisted.cred import portal, checkers
21 from twisted.application import service, strports
22 from twisted.persisted import styles
24 # sibling imports
25 from buildbot.util import now
26 from buildbot.pbutil import NewCredPerspective
27 from buildbot.process.builder import Builder, IDLE
28 from buildbot.process.base import BuildRequest
29 from buildbot.status.builder import SlaveStatus, Status
30 from buildbot.changes.changes import Change, ChangeMaster
31 from buildbot.sourcestamp import SourceStamp
32 from buildbot import interfaces
33 from buildbot.slave import BuildSlave
35 ########################################
40 class BotPerspective(NewCredPerspective):
41 """This is the master-side representative for a remote buildbot slave.
42 There is exactly one for each slave described in the config file (the
43 c['slaves'] list). When buildbots connect in (.attach), they get a
44 reference to this instance. The BotMaster object is stashed as the
45 .service attribute."""
47 def __init__(self, name, botmaster):
48 self.slavename = name
49 self.botmaster = botmaster
50 self.slave_status = SlaveStatus(name)
51 self.slave = None # a RemoteReference to the Bot, when connected
52 self.slave_commands = None
54 def updateSlave(self):
55 """Called to add or remove builders after the slave has connected.
57 @return: a Deferred that indicates when an attached slave has
58 accepted the new builders and/or released the old ones."""
59 if self.slave:
60 return self.sendBuilderList()
61 return defer.succeed(None)
63 def __repr__(self):
64 builders = self.botmaster.getBuildersForSlave(self.slavename)
65 return "<BotPerspective '%s', current builders: %s>" % \
66 (self.slavename,
67 string.join(map(lambda b: b.name, builders), ','))
69 def attached(self, bot):
70 """This is called when the slave connects.
72 @return: a Deferred that fires with a suitable pb.IPerspective to
73 give to the slave (i.e. 'self')"""
75 if self.slave:
76 # uh-oh, we've got a duplicate slave. The most likely
77 # explanation is that the slave is behind a slow link, thinks we
78 # went away, and has attempted to reconnect, so we've got two
79 # "connections" from the same slave, but the previous one is
80 # stale. Give the new one precedence.
81 log.msg("duplicate slave %s replacing old one" % self.slavename)
83 # just in case we've got two identically-configured slaves,
84 # report the IP addresses of both so someone can resolve the
85 # squabble
86 tport = self.slave.broker.transport
87 log.msg("old slave was connected from", tport.getPeer())
88 log.msg("new slave is from", bot.broker.transport.getPeer())
89 d = self.disconnect()
90 else:
91 d = defer.succeed(None)
92 # now we go through a sequence of calls, gathering information, then
93 # tell the Botmaster that it can finally give this slave to all the
94 # Builders that care about it.
96 # we accumulate slave information in this 'state' dictionary, then
97 # set it atomically if we make it far enough through the process
98 state = {}
100 def _log_attachment_on_slave(res):
101 d1 = bot.callRemote("print", "attached")
102 d1.addErrback(lambda why: None)
103 return d1
104 d.addCallback(_log_attachment_on_slave)
106 def _get_info(res):
107 d1 = bot.callRemote("getSlaveInfo")
108 def _got_info(info):
109 log.msg("Got slaveinfo from '%s'" % self.slavename)
110 # TODO: info{} might have other keys
111 state["admin"] = info.get("admin")
112 state["host"] = info.get("host")
113 def _info_unavailable(why):
114 # maybe an old slave, doesn't implement remote_getSlaveInfo
115 log.msg("BotPerspective.info_unavailable")
116 log.err(why)
117 d1.addCallbacks(_got_info, _info_unavailable)
118 return d1
119 d.addCallback(_get_info)
121 def _get_commands(res):
122 d1 = bot.callRemote("getCommands")
123 def _got_commands(commands):
124 state["slave_commands"] = commands
125 def _commands_unavailable(why):
126 # probably an old slave
127 log.msg("BotPerspective._commands_unavailable")
128 if why.check(AttributeError):
129 return
130 log.err(why)
131 d1.addCallbacks(_got_commands, _commands_unavailable)
132 return d1
133 d.addCallback(_get_commands)
135 def _accept_slave(res):
136 self.slave_status.setAdmin(state.get("admin"))
137 self.slave_status.setHost(state.get("host"))
138 self.slave_status.setConnected(True)
139 self.slave_commands = state.get("slave_commands")
140 self.slave = bot
141 log.msg("bot attached")
142 return self.updateSlave()
143 d.addCallback(_accept_slave)
145 # Finally, the slave gets a reference to this BotPerspective. They
146 # receive this later, after we've started using them.
147 d.addCallback(lambda res: self)
148 return d
150 def detached(self, mind):
151 self.slave = None
152 self.slave_status.setConnected(False)
153 self.botmaster.slaveLost(self)
154 log.msg("BotPerspective.detached(%s)" % self.slavename)
157 def disconnect(self):
158 """Forcibly disconnect the slave.
160 This severs the TCP connection and returns a Deferred that will fire
161 (with None) when the connection is probably gone.
163 If the slave is still alive, they will probably try to reconnect
164 again in a moment.
166 This is called in two circumstances. The first is when a slave is
167 removed from the config file. In this case, when they try to
168 reconnect, they will be rejected as an unknown slave. The second is
169 when we wind up with two connections for the same slave, in which
170 case we disconnect the older connection.
173 if not self.slave:
174 return defer.succeed(None)
175 log.msg("disconnecting old slave %s now" % self.slavename)
177 # all kinds of teardown will happen as a result of
178 # loseConnection(), but it happens after a reactor iteration or
179 # two. Hook the actual disconnect so we can know when it is safe
180 # to connect the new slave. We have to wait one additional
181 # iteration (with callLater(0)) to make sure the *other*
182 # notifyOnDisconnect handlers have had a chance to run.
183 d = defer.Deferred()
185 # notifyOnDisconnect runs the callback with one argument, the
186 # RemoteReference being disconnected.
187 def _disconnected(rref):
188 reactor.callLater(0, d.callback, None)
189 self.slave.notifyOnDisconnect(_disconnected)
190 tport = self.slave.broker.transport
191 # this is the polite way to request that a socket be closed
192 tport.loseConnection()
193 try:
194 # but really we don't want to wait for the transmit queue to
195 # drain. The remote end is unlikely to ACK the data, so we'd
196 # probably have to wait for a (20-minute) TCP timeout.
197 #tport._closeSocket()
198 # however, doing _closeSocket (whether before or after
199 # loseConnection) somehow prevents the notifyOnDisconnect
200 # handlers from being run. Bummer.
201 tport.offset = 0
202 tport.dataBuffer = ""
203 pass
204 except:
205 # however, these hacks are pretty internal, so don't blow up if
206 # they fail or are unavailable
207 log.msg("failed to accelerate the shutdown process")
208 pass
209 log.msg("waiting for slave to finish disconnecting")
211 # When this Deferred fires, we'll be ready to accept the new slave
212 return d
214 def sendBuilderList(self):
215 our_builders = self.botmaster.getBuildersForSlave(self.slavename)
216 blist = [(b.name, b.builddir) for b in our_builders]
217 d = self.slave.callRemote("setBuilderList", blist)
218 def _sent(slist):
219 dl = []
220 for name, remote in slist.items():
221 # use get() since we might have changed our mind since then
222 b = self.botmaster.builders.get(name)
223 if b:
224 d1 = b.attached(self, remote, self.slave_commands)
225 dl.append(d1)
226 return defer.DeferredList(dl)
227 def _set_failed(why):
228 log.msg("BotPerspective.sendBuilderList (%s) failed" % self)
229 log.err(why)
230 # TODO: hang up on them?, without setBuilderList we can't use
231 # them
232 d.addCallbacks(_sent, _set_failed)
233 return d
235 def perspective_keepalive(self):
236 pass
239 class BotMaster(service.Service):
241 """This is the master-side service which manages remote buildbot slaves.
242 It provides them with BotPerspectives, and distributes file change
243 notification messages to them.
246 debug = 0
248 def __init__(self):
249 self.builders = {}
250 self.builderNames = []
251 # builders maps Builder names to instances of bb.p.builder.Builder,
252 # which is the master-side object that defines and controls a build.
253 # They are added by calling botmaster.addBuilder() from the startup
254 # code.
256 # self.slaves contains a ready BotPerspective instance for each
257 # potential buildslave, i.e. all the ones listed in the config file.
258 # If the slave is connected, self.slaves[slavename].slave will
259 # contain a RemoteReference to their Bot instance. If it is not
260 # connected, that attribute will hold None.
261 self.slaves = {} # maps slavename to BotPerspective
262 self.statusClientService = None
263 self.watchers = {}
265 # self.locks holds the real Lock instances
266 self.locks = {}
268 # these four are convenience functions for testing
270 def waitUntilBuilderAttached(self, name):
271 b = self.builders[name]
272 #if b.slaves:
273 # return defer.succeed(None)
274 d = defer.Deferred()
275 b.watchers['attach'].append(d)
276 return d
278 def waitUntilBuilderDetached(self, name):
279 b = self.builders.get(name)
280 if not b or not b.slaves:
281 return defer.succeed(None)
282 d = defer.Deferred()
283 b.watchers['detach'].append(d)
284 return d
286 def waitUntilBuilderFullyDetached(self, name):
287 b = self.builders.get(name)
288 # TODO: this looks too deeply inside the Builder object
289 if not b or not b.slaves:
290 return defer.succeed(None)
291 d = defer.Deferred()
292 b.watchers['detach_all'].append(d)
293 return d
295 def waitUntilBuilderIdle(self, name):
296 b = self.builders[name]
297 # TODO: this looks way too deeply inside the Builder object
298 for sb in b.slaves:
299 if sb.state != IDLE:
300 d = defer.Deferred()
301 b.watchers['idle'].append(d)
302 return d
303 return defer.succeed(None)
306 def addSlave(self, slavename):
307 slave = BotPerspective(slavename, self)
308 self.slaves[slavename] = slave
310 def removeSlave(self, slavename):
311 d = self.slaves[slavename].disconnect()
312 del self.slaves[slavename]
313 return d
315 def slaveLost(self, bot):
316 for name, b in self.builders.items():
317 if bot.slavename in b.slavenames:
318 b.detached(bot)
320 def getBuildersForSlave(self, slavename):
321 return [b
322 for b in self.builders.values()
323 if slavename in b.slavenames]
325 def getBuildernames(self):
326 return self.builderNames
328 def getBuilders(self):
329 allBuilders = [self.builders[name] for name in self.builderNames]
330 return allBuilders
332 def setBuilders(self, builders):
333 self.builders = {}
334 self.builderNames = []
335 for b in builders:
336 for slavename in b.slavenames:
337 # this is actually validated earlier
338 assert slavename in self.slaves
339 self.builders[b.name] = b
340 self.builderNames.append(b.name)
341 b.setBotmaster(self)
342 d = self._updateAllSlaves()
343 return d
345 def _updateAllSlaves(self):
346 """Notify all buildslaves about changes in their Builders."""
347 dl = [s.updateSlave() for s in self.slaves.values()]
348 return defer.DeferredList(dl)
350 def maybeStartAllBuilds(self):
351 for b in self.builders.values():
352 b.maybeStartBuild()
354 def getPerspective(self, slavename):
355 return self.slaves[slavename]
357 def shutdownSlaves(self):
358 # TODO: make this into a bot method rather than a builder method
359 for b in self.slaves.values():
360 b.shutdownSlave()
362 def stopService(self):
363 for b in self.builders.values():
364 b.builder_status.addPointEvent(["master", "shutdown"])
365 b.builder_status.saveYourself()
366 return service.Service.stopService(self)
368 def getLockByID(self, lockid):
369 """Convert a Lock identifier into an actual Lock instance.
370 @param lockid: a locks.MasterLock or locks.SlaveLock instance
371 @return: a locks.RealMasterLock or locks.RealSlaveLock instance
373 if not lockid in self.locks:
374 self.locks[lockid] = lockid.lockClass(lockid)
375 # if the master.cfg file has changed maxCount= on the lock, the next
376 # time a build is started, they'll get a new RealLock instance. Note
377 # that this requires that MasterLock and SlaveLock (marker) instances
378 # be hashable and that they should compare properly.
379 return self.locks[lockid]
381 ########################################
385 class DebugPerspective(NewCredPerspective):
386 def attached(self, mind):
387 return self
388 def detached(self, mind):
389 pass
391 def perspective_requestBuild(self, buildername, reason, branch, revision):
392 c = interfaces.IControl(self.master)
393 bc = c.getBuilder(buildername)
394 ss = SourceStamp(branch, revision)
395 br = BuildRequest(reason, ss, buildername)
396 bc.requestBuild(br)
398 def perspective_pingBuilder(self, buildername):
399 c = interfaces.IControl(self.master)
400 bc = c.getBuilder(buildername)
401 bc.ping()
403 def perspective_fakeChange(self, file, revision=None, who="fakeUser",
404 branch=None):
405 change = Change(who, [file], "some fake comments\n",
406 branch=branch, revision=revision)
407 c = interfaces.IControl(self.master)
408 c.addChange(change)
410 def perspective_setCurrentState(self, buildername, state):
411 builder = self.botmaster.builders.get(buildername)
412 if not builder: return
413 if state == "offline":
414 builder.statusbag.currentlyOffline()
415 if state == "idle":
416 builder.statusbag.currentlyIdle()
417 if state == "waiting":
418 builder.statusbag.currentlyWaiting(now()+10)
419 if state == "building":
420 builder.statusbag.currentlyBuilding(None)
421 def perspective_reload(self):
422 print "doing reload of the config file"
423 self.master.loadTheConfigFile()
424 def perspective_pokeIRC(self):
425 print "saying something on IRC"
426 from buildbot.status import words
427 for s in self.master:
428 if isinstance(s, words.IRC):
429 bot = s.f
430 for channel in bot.channels:
431 print " channel", channel
432 bot.p.msg(channel, "Ow, quit it")
434 def perspective_print(self, msg):
435 print "debug", msg
437 class Dispatcher(styles.Versioned):
438 implements(portal.IRealm)
439 persistenceVersion = 2
441 def __init__(self):
442 self.names = {}
444 def upgradeToVersion1(self):
445 self.master = self.botmaster.parent
446 def upgradeToVersion2(self):
447 self.names = {}
449 def register(self, name, afactory):
450 self.names[name] = afactory
451 def unregister(self, name):
452 del self.names[name]
454 def requestAvatar(self, avatarID, mind, interface):
455 assert interface == pb.IPerspective
456 afactory = self.names.get(avatarID)
457 if afactory:
458 p = afactory.getPerspective()
459 elif avatarID == "debug":
460 p = DebugPerspective()
461 p.master = self.master
462 p.botmaster = self.botmaster
463 elif avatarID == "statusClient":
464 p = self.statusClientService.getPerspective()
465 else:
466 # it must be one of the buildslaves: no other names will make it
467 # past the checker
468 p = self.botmaster.getPerspective(avatarID)
470 if not p:
471 raise ValueError("no perspective for '%s'" % avatarID)
473 d = defer.maybeDeferred(p.attached, mind)
474 d.addCallback(self._avatarAttached, mind)
475 return d
477 def _avatarAttached(self, p, mind):
478 return (pb.IPerspective, p, lambda p=p,mind=mind: p.detached(mind))
480 ########################################
482 # service hierarchy:
483 # BuildMaster
484 # BotMaster
485 # ChangeMaster
486 # all IChangeSource objects
487 # StatusClientService
488 # TCPClient(self.ircFactory)
489 # TCPServer(self.slaveFactory) -> dispatcher.requestAvatar
490 # TCPServer(self.site)
491 # UNIXServer(ResourcePublisher(self.site))
494 class BuildMaster(service.MultiService, styles.Versioned):
495 debug = 0
496 persistenceVersion = 3
497 manhole = None
498 debugPassword = None
499 projectName = "(unspecified)"
500 projectURL = None
501 buildbotURL = None
502 change_svc = None
504 def __init__(self, basedir, configFileName="master.cfg"):
505 service.MultiService.__init__(self)
506 self.setName("buildmaster")
507 self.basedir = basedir
508 self.configFileName = configFileName
510 # the dispatcher is the realm in which all inbound connections are
511 # looked up: slave builders, change notifications, status clients, and
512 # the debug port
513 dispatcher = Dispatcher()
514 dispatcher.master = self
515 self.dispatcher = dispatcher
516 self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
517 # the checker starts with no user/passwd pairs: they are added later
518 p = portal.Portal(dispatcher)
519 p.registerChecker(self.checker)
520 self.slaveFactory = pb.PBServerFactory(p)
521 self.slaveFactory.unsafeTracebacks = True # let them see exceptions
523 self.slavePortnum = None
524 self.slavePort = None
526 self.botmaster = BotMaster()
527 self.botmaster.setName("botmaster")
528 self.botmaster.setServiceParent(self)
529 dispatcher.botmaster = self.botmaster
531 self.status = Status(self.botmaster, self.basedir)
533 self.statusTargets = []
535 self.slaves = []
536 # this ChangeMaster is a dummy, only used by tests. In the real
537 # buildmaster, where the BuildMaster instance is activated
538 # (startService is called) by twistd, this attribute is overwritten.
539 self.useChanges(ChangeMaster())
541 self.readConfig = False
543 def upgradeToVersion1(self):
544 self.dispatcher = self.slaveFactory.root.portal.realm
546 def upgradeToVersion2(self): # post-0.4.3
547 self.webServer = self.webTCPPort
548 del self.webTCPPort
549 self.webDistribServer = self.webUNIXPort
550 del self.webUNIXPort
551 self.configFileName = "master.cfg"
553 def upgradeToVersion3(self):
554 # post 0.6.3, solely to deal with the 0.6.3 breakage. Starting with
555 # 0.6.5 I intend to do away with .tap files altogether
556 self.services = []
557 self.namedServices = {}
558 del self.change_svc
560 def startService(self):
561 service.MultiService.startService(self)
562 self.loadChanges() # must be done before loading the config file
563 if not self.readConfig:
564 # TODO: consider catching exceptions during this call to
565 # loadTheConfigFile and bailing (reactor.stop) if it fails,
566 # since without a config file we can't do anything except reload
567 # the config file, and it would be nice for the user to discover
568 # this quickly.
569 self.loadTheConfigFile()
570 if signal and hasattr(signal, "SIGHUP"):
571 signal.signal(signal.SIGHUP, self._handleSIGHUP)
572 for b in self.botmaster.builders.values():
573 b.builder_status.addPointEvent(["master", "started"])
574 b.builder_status.saveYourself()
576 def useChanges(self, changes):
577 if self.change_svc:
578 # TODO: can return a Deferred
579 self.change_svc.disownServiceParent()
580 self.change_svc = changes
581 self.change_svc.basedir = self.basedir
582 self.change_svc.setName("changemaster")
583 self.dispatcher.changemaster = self.change_svc
584 self.change_svc.setServiceParent(self)
586 def loadChanges(self):
587 filename = os.path.join(self.basedir, "changes.pck")
588 try:
589 changes = pickle.load(open(filename, "rb"))
590 styles.doUpgrade()
591 except IOError:
592 log.msg("changes.pck missing, using new one")
593 changes = ChangeMaster()
594 except EOFError:
595 log.msg("corrupted changes.pck, using new one")
596 changes = ChangeMaster()
597 self.useChanges(changes)
599 def _handleSIGHUP(self, *args):
600 reactor.callLater(0, self.loadTheConfigFile)
602 def getStatus(self):
604 @rtype: L{buildbot.status.builder.Status}
606 return self.status
608 def loadTheConfigFile(self, configFile=None):
609 if not configFile:
610 configFile = os.path.join(self.basedir, self.configFileName)
612 log.msg("loading configuration from %s" % configFile)
613 configFile = os.path.expanduser(configFile)
615 try:
616 f = open(configFile, "r")
617 except IOError, e:
618 log.msg("unable to open config file '%s'" % configFile)
619 log.msg("leaving old configuration in place")
620 log.err(e)
621 return
623 try:
624 self.loadConfig(f)
625 except:
626 log.msg("error during loadConfig")
627 log.err()
628 log.msg("The new config file is unusable, so I'll ignore it.")
629 log.msg("I will keep using the previous config file instead.")
630 f.close()
632 def loadConfig(self, f):
633 """Internal function to load a specific configuration file. Any
634 errors in the file will be signalled by raising an exception.
636 @return: a Deferred that will fire (with None) when the configuration
637 changes have been completed. This may involve a round-trip to each
638 buildslave that was involved."""
640 localDict = {'basedir': os.path.expanduser(self.basedir)}
641 try:
642 exec f in localDict
643 except:
644 log.msg("error while parsing config file")
645 raise
647 try:
648 config = localDict['BuildmasterConfig']
649 except KeyError:
650 log.err("missing config dictionary")
651 log.err("config file must define BuildmasterConfig")
652 raise
654 known_keys = ("bots", "slaves", "sources", "schedulers", "builders",
655 "slavePortnum", "debugPassword", "manhole",
656 "status", "projectName", "projectURL", "buildbotURL",
658 for k in config.keys():
659 if k not in known_keys:
660 log.msg("unknown key '%s' defined in config dictionary" % k)
662 try:
663 # required
664 sources = config['sources']
665 schedulers = config['schedulers']
666 builders = config['builders']
667 slavePortnum = config['slavePortnum']
668 #slaves = config['slaves']
670 # optional
671 debugPassword = config.get('debugPassword')
672 manhole = config.get('manhole')
673 status = config.get('status', [])
674 projectName = config.get('projectName')
675 projectURL = config.get('projectURL')
676 buildbotURL = config.get('buildbotURL')
678 except KeyError, e:
679 log.msg("config dictionary is missing a required parameter")
680 log.msg("leaving old configuration in place")
681 raise
683 #if "bots" in config:
684 # raise KeyError("c['bots'] is no longer accepted")
686 slaves = config.get('slaves', [])
687 if "bots" in config:
688 m = ("c['bots'] is deprecated as of 0.7.6, please use "
689 "c['slaves'] instead")
690 log.msg(m)
691 warnings.warn(m, DeprecationWarning)
692 for name, passwd in config['bots']:
693 slaves.append(BuildSlave(name, passwd))
695 if "bots" not in config and "slaves" not in config:
696 log.msg("config dictionary must have either 'bots' or 'slaves'")
697 log.msg("leaving old configuration in place")
698 raise KeyError("must have either 'bots' or 'slaves'")
700 # do some validation first
701 for s in slaves:
702 assert isinstance(s, BuildSlave)
703 if s.name in ("debug", "change", "status"):
704 raise KeyError, "reserved name '%s' used for a bot" % s.name
705 if config.has_key('interlocks'):
706 raise KeyError("c['interlocks'] is no longer accepted")
708 assert isinstance(sources, (list, tuple))
709 for s in sources:
710 assert interfaces.IChangeSource(s, None)
711 # this assertion catches c['schedulers'] = Scheduler(), since
712 # Schedulers are service.MultiServices and thus iterable.
713 errmsg = "c['schedulers'] must be a list of Scheduler instances"
714 assert isinstance(schedulers, (list, tuple)), errmsg
715 for s in schedulers:
716 assert interfaces.IScheduler(s, None), errmsg
717 assert isinstance(status, (list, tuple))
718 for s in status:
719 assert interfaces.IStatusReceiver(s, None)
721 slavenames = [s.name for s in slaves]
722 buildernames = []
723 dirnames = []
724 for b in builders:
725 if type(b) is tuple:
726 raise ValueError("builder %s must be defined with a dict, "
727 "not a tuple" % b[0])
728 if b.has_key('slavename') and b['slavename'] not in slavenames:
729 raise ValueError("builder %s uses undefined slave %s" \
730 % (b['name'], b['slavename']))
731 for n in b.get('slavenames', []):
732 if n not in slavenames:
733 raise ValueError("builder %s uses undefined slave %s" \
734 % (b['name'], n))
735 if b['name'] in buildernames:
736 raise ValueError("duplicate builder name %s"
737 % b['name'])
738 buildernames.append(b['name'])
739 if b['builddir'] in dirnames:
740 raise ValueError("builder %s reuses builddir %s"
741 % (b['name'], b['builddir']))
742 dirnames.append(b['builddir'])
744 unscheduled_buildernames = buildernames[:]
745 schedulernames = []
746 for s in schedulers:
747 for b in s.listBuilderNames():
748 assert b in buildernames, \
749 "%s uses unknown builder %s" % (s, b)
750 if b in unscheduled_buildernames:
751 unscheduled_buildernames.remove(b)
753 if s.name in schedulernames:
754 # TODO: schedulers share a namespace with other Service
755 # children of the BuildMaster node, like status plugins, the
756 # Manhole, the ChangeMaster, and the BotMaster (although most
757 # of these don't have names)
758 msg = ("Schedulers must have unique names, but "
759 "'%s' was a duplicate" % (s.name,))
760 raise ValueError(msg)
761 schedulernames.append(s.name)
763 if unscheduled_buildernames:
764 log.msg("Warning: some Builders have no Schedulers to drive them:"
765 " %s" % (unscheduled_buildernames,))
767 # assert that all locks used by the Builds and their Steps are
768 # uniquely named.
769 locks = {}
770 for b in builders:
771 for l in b.get('locks', []):
772 if locks.has_key(l.name):
773 if locks[l.name] is not l:
774 raise ValueError("Two different locks (%s and %s) "
775 "share the name %s"
776 % (l, locks[l.name], l.name))
777 else:
778 locks[l.name] = l
779 # TODO: this will break with any BuildFactory that doesn't use a
780 # .steps list, but I think the verification step is more
781 # important.
782 for s in b['factory'].steps:
783 for l in s[1].get('locks', []):
784 if locks.has_key(l.name):
785 if locks[l.name] is not l:
786 raise ValueError("Two different locks (%s and %s)"
787 " share the name %s"
788 % (l, locks[l.name], l.name))
789 else:
790 locks[l.name] = l
792 # slavePortnum supposed to be a strports specification
793 if type(slavePortnum) is int:
794 slavePortnum = "tcp:%d" % slavePortnum
796 # now we're committed to implementing the new configuration, so do
797 # it atomically
798 # TODO: actually, this is spread across a couple of Deferreds, so it
799 # really isn't atomic.
801 d = defer.succeed(None)
803 self.projectName = projectName
804 self.projectURL = projectURL
805 self.buildbotURL = buildbotURL
807 # self.slaves: Disconnect any that were attached and removed from the
808 # list. Update self.checker with the new list of passwords, including
809 # debug/change/status.
810 d.addCallback(lambda res: self.loadConfig_Slaves(slaves))
812 # self.debugPassword
813 if debugPassword:
814 self.checker.addUser("debug", debugPassword)
815 self.debugPassword = debugPassword
817 # self.manhole
818 if manhole != self.manhole:
819 # changing
820 if self.manhole:
821 # disownServiceParent may return a Deferred
822 d.addCallback(lambda res: self.manhole.disownServiceParent())
823 def _remove(res):
824 self.manhole = None
825 return res
826 d.addCallback(_remove)
827 if manhole:
828 def _add(res):
829 self.manhole = manhole
830 manhole.setServiceParent(self)
831 d.addCallback(_add)
833 # add/remove self.botmaster.builders to match builders. The
834 # botmaster will handle startup/shutdown issues.
835 d.addCallback(lambda res: self.loadConfig_Builders(builders))
837 d.addCallback(lambda res: self.loadConfig_status(status))
839 # Schedulers are added after Builders in case they start right away
840 d.addCallback(lambda res: self.loadConfig_Schedulers(schedulers))
841 # and Sources go after Schedulers for the same reason
842 d.addCallback(lambda res: self.loadConfig_Sources(sources))
844 # self.slavePort
845 if self.slavePortnum != slavePortnum:
846 if self.slavePort:
847 def closeSlavePort(res):
848 d1 = self.slavePort.disownServiceParent()
849 self.slavePort = None
850 return d1
851 d.addCallback(closeSlavePort)
852 if slavePortnum is not None:
853 def openSlavePort(res):
854 self.slavePort = strports.service(slavePortnum,
855 self.slaveFactory)
856 self.slavePort.setServiceParent(self)
857 d.addCallback(openSlavePort)
858 log.msg("BuildMaster listening on port %s" % slavePortnum)
859 self.slavePortnum = slavePortnum
861 log.msg("configuration update started")
862 def _done(res):
863 self.readConfig = True
864 log.msg("configuration update complete")
865 d.addCallback(_done)
866 d.addCallback(lambda res: self.botmaster.maybeStartAllBuilds())
867 return d
869 def loadConfig_Slaves(self, slaves):
870 # set up the Checker with the names and passwords of all valid bots
871 self.checker.users = {} # violates abstraction, oh well
872 for s in slaves:
873 self.checker.addUser(s.name, s.password)
874 self.checker.addUser("change", "changepw")
876 # identify new/old bots
877 old = []; new = []
878 for s in slaves:
879 if s not in self.slaves:
880 new.append(s)
881 for s in self.slaves:
882 if s not in slaves:
883 old.append(s)
884 # removeSlave will hang up on the old bot
885 dl = [self.botmaster.removeSlave(s.name) for s in old]
886 d = defer.DeferredList(dl, fireOnOneErrback=True)
887 def _add(res):
888 for s in new:
889 self.botmaster.addSlave(s.name)
890 self.slaves = slaves
891 d.addCallback(_add)
892 return d
894 def loadConfig_Sources(self, sources):
895 log.msg("loadConfig_Sources, change_svc is", self.change_svc,
896 self.change_svc.parent)
897 # shut down any that were removed, start any that were added
898 deleted_sources = [s for s in self.change_svc if s not in sources]
899 added_sources = [s for s in sources if s not in self.change_svc]
900 dl = [self.change_svc.removeSource(s) for s in deleted_sources]
901 def addNewOnes(res):
902 [self.change_svc.addSource(s) for s in added_sources]
903 d = defer.DeferredList(dl, fireOnOneErrback=1, consumeErrors=0)
904 d.addCallback(addNewOnes)
905 return d
907 def allSchedulers(self):
908 return [child for child in self
909 if interfaces.IScheduler.providedBy(child)]
912 def loadConfig_Schedulers(self, newschedulers):
913 oldschedulers = self.allSchedulers()
914 removed = [s for s in oldschedulers if s not in newschedulers]
915 added = [s for s in newschedulers if s not in oldschedulers]
916 dl = [defer.maybeDeferred(s.disownServiceParent) for s in removed]
917 def addNewOnes(res):
918 for s in added:
919 s.setServiceParent(self)
920 d = defer.DeferredList(dl, fireOnOneErrback=1)
921 d.addCallback(addNewOnes)
922 return d
924 def loadConfig_Builders(self, newBuilderData):
925 somethingChanged = False
926 newList = {}
927 newBuilderNames = []
928 allBuilders = self.botmaster.builders.copy()
929 for data in newBuilderData:
930 name = data['name']
931 newList[name] = data
932 newBuilderNames.append(name)
934 # identify all that were removed
935 for oldname in self.botmaster.getBuildernames():
936 if oldname not in newList:
937 log.msg("removing old builder %s" % oldname)
938 del allBuilders[oldname]
939 somethingChanged = True
940 # announce the change
941 self.status.builderRemoved(oldname)
943 # everything in newList is either unchanged, changed, or new
944 for name, data in newList.items():
945 old = self.botmaster.builders.get(name)
946 basedir = data['builddir'] # used on both master and slave
947 #name, slave, builddir, factory = data
948 if not old: # new
949 # category added after 0.6.2
950 category = data.get('category', None)
951 log.msg("adding new builder %s for category %s" %
952 (name, category))
953 statusbag = self.status.builderAdded(name, basedir, category)
954 builder = Builder(data, statusbag)
955 allBuilders[name] = builder
956 somethingChanged = True
957 elif old.compareToSetup(data):
958 # changed: try to minimize the disruption and only modify the
959 # pieces that really changed
960 diffs = old.compareToSetup(data)
961 log.msg("updating builder %s: %s" % (name, "\n".join(diffs)))
963 statusbag = old.builder_status
964 statusbag.saveYourself() # seems like a good idea
965 # TODO: if the basedir was changed, we probably need to make
966 # a new statusbag
967 new_builder = Builder(data, statusbag)
968 new_builder.consumeTheSoulOfYourPredecessor(old)
969 # that migrates any retained slavebuilders too
971 # point out that the builder was updated. On the Waterfall,
972 # this will appear just after any currently-running builds.
973 statusbag.addPointEvent(["config", "updated"])
975 allBuilders[name] = new_builder
976 somethingChanged = True
977 else:
978 # unchanged: leave it alone
979 log.msg("builder %s is unchanged" % name)
980 pass
982 if somethingChanged:
983 sortedAllBuilders = [allBuilders[name] for name in newBuilderNames]
984 d = self.botmaster.setBuilders(sortedAllBuilders)
985 return d
986 return None
988 def loadConfig_status(self, status):
989 dl = []
991 # remove old ones
992 for s in self.statusTargets[:]:
993 if not s in status:
994 log.msg("removing IStatusReceiver", s)
995 d = defer.maybeDeferred(s.disownServiceParent)
996 dl.append(d)
997 self.statusTargets.remove(s)
998 # after those are finished going away, add new ones
999 def addNewOnes(res):
1000 for s in status:
1001 if not s in self.statusTargets:
1002 log.msg("adding IStatusReceiver", s)
1003 s.setServiceParent(self)
1004 self.statusTargets.append(s)
1005 d = defer.DeferredList(dl, fireOnOneErrback=1)
1006 d.addCallback(addNewOnes)
1007 return d
1010 def addChange(self, change):
1011 for s in self.allSchedulers():
1012 s.addChange(change)
1014 def submitBuildSet(self, bs):
1015 # determine the set of Builders to use
1016 builders = []
1017 for name in bs.builderNames:
1018 b = self.botmaster.builders.get(name)
1019 if b:
1020 if b not in builders:
1021 builders.append(b)
1022 continue
1023 # TODO: add aliases like 'all'
1024 raise KeyError("no such builder named '%s'" % name)
1026 # now tell the BuildSet to create BuildRequests for all those
1027 # Builders and submit them
1028 bs.start(builders)
1029 self.status.buildsetSubmitted(bs.status)
1032 class Control:
1033 implements(interfaces.IControl)
1035 def __init__(self, master):
1036 self.master = master
1038 def addChange(self, change):
1039 self.master.change_svc.addChange(change)
1041 def submitBuildSet(self, bs):
1042 self.master.submitBuildSet(bs)
1044 def getBuilder(self, name):
1045 b = self.master.botmaster.builders[name]
1046 return interfaces.IBuilderControl(b)
1048 components.registerAdapter(Control, BuildMaster, interfaces.IControl)
1050 # so anybody who can get a handle on the BuildMaster can cause a build with:
1051 # IControl(master).getBuilder("full-2.3").requestBuild(buildrequest)