implement manhole (ssh-based, colorized/line-editing terminal)
[buildbot.git] / buildbot / master.py
blob00b88d91fcc8264c2e7ac2f3c8aa376f5944842c
1 # -*- test-case-name: buildbot.test.test_run -*-
3 from __future__ import generators
4 import string, sys, os, time, warnings
5 try:
6 import signal
7 except ImportError:
8 signal = None
9 try:
10 import cPickle as pickle
11 except ImportError:
12 import pickle
14 from twisted.python import log, usage, components
15 from twisted.internet import defer, reactor
16 from twisted.spread import pb
17 from twisted.cred import portal, checkers
18 from twisted.application import service, strports
19 from twisted.persisted import styles
21 # sibling imports
22 from buildbot import util
23 from buildbot.twcompat import implements
24 from buildbot.util import now
25 from buildbot.pbutil import NewCredPerspective
26 from buildbot.process.builder import Builder, IDLE
27 from buildbot.status.builder import BuilderStatus, SlaveStatus, Status
28 from buildbot.changes.changes import Change, ChangeMaster
29 from buildbot import interfaces
31 ########################################
36 class BotPerspective(NewCredPerspective):
37 """This is the master-side representative for a remote buildbot slave.
38 There is exactly one for each slave described in the config file (the
39 c['bots'] list). When buildbots connect in (.attach), they get a
40 reference to this instance. The BotMaster object is stashed as the
41 .service attribute."""
43 slave_commands = None
45 def __init__(self, name):
46 self.slavename = name
47 self.slave_status = SlaveStatus(name)
48 self.builders = [] # list of b.p.builder.Builder instances
49 self.slave = None # a RemoteReference to the Bot, when connected
51 def addBuilder(self, builder):
52 """Called to add a builder after the slave has connected.
54 @return: a Deferred that indicates when an attached slave has
55 accepted the new builder."""
57 self.builders.append(builder)
58 if self.slave:
59 return self.sendBuilderList()
60 return defer.succeed(None)
62 def removeBuilder(self, builder):
63 """Tell the slave that the given builder has been removed, allowing
64 it to discard the associated L{buildbot.slave.bot.SlaveBuilder}
65 object.
67 @return: a Deferred that fires when the slave has finished removing
68 the SlaveBuilder
69 """
70 self.builders.remove(builder)
71 if self.slave:
72 builder.detached(self)
73 return self.sendBuilderList()
74 return defer.succeed(None)
76 def __repr__(self):
77 return "<BotPerspective '%s', builders: %s>" % \
78 (self.slavename,
79 string.join(map(lambda b: b.name, self.builders), ','))
81 def attached(self, mind):
82 """This is called when the slave connects.
84 @return: a Deferred that fires with a suitable pb.IPerspective to
85 give to the slave (i.e. 'self')"""
87 if self.slave:
88 # uh-oh, we've got a duplicate slave. The most likely
89 # explanation is that the slave is behind a slow link, thinks we
90 # went away, and has attempted to reconnect, so we've got two
91 # "connections" from the same slave, but the previous one is
92 # stale. Give the new one precedence.
93 log.msg("duplicate slave %s replacing old one" % self.slavename)
95 # just in case we've got two identically-configured slaves,
96 # report the IP addresses of both so someone can resolve the
97 # squabble
98 tport = self.slave.broker.transport
99 log.msg("old slave was connected from", tport.getPeer())
100 log.msg("new slave is from", mind.broker.transport.getPeer())
101 d = self.disconnect()
102 d.addCallback(lambda res: self._attached(mind))
103 return d
105 return self._attached(mind)
107 def disconnect(self):
108 if not self.slave:
109 return defer.succeed(None)
110 log.msg("disconnecting old slave %s now" % self.slavename)
112 # all kinds of teardown will happen as a result of
113 # loseConnection(), but it happens after a reactor iteration or
114 # two. Hook the actual disconnect so we can know when it is safe
115 # to connect the new slave. We have to wait one additional
116 # iteration (with callLater(0)) to make sure the *other*
117 # notifyOnDisconnect handlers have had a chance to run.
118 d = defer.Deferred()
120 self.slave.notifyOnDisconnect(lambda res: # TODO: d=d ?
121 reactor.callLater(0, d.callback, None))
122 tport = self.slave.broker.transport
123 # this is the polite way to request that a socket be closed
124 tport.loseConnection()
125 try:
126 # but really we don't want to wait for the transmit queue to
127 # drain. The remote end is unlikely to ACK the data, so we'd
128 # probably have to wait for a (20-minute) TCP timeout.
129 #tport._closeSocket()
130 # however, doing _closeSocket (whether before or after
131 # loseConnection) somehow prevents the notifyOnDisconnect
132 # handlers from being run. Bummer.
133 tport.offset = 0
134 tport.dataBuffer = ""
135 pass
136 except:
137 # however, these hacks are pretty internal, so don't blow up if
138 # they fail or are unavailable
139 log.msg("failed to accelerate the shutdown process")
140 pass
141 log.msg("waiting for slave to finish disconnecting")
143 # When this Deferred fires, we'll be ready to accept the new slave
144 return d
146 def _attached(self, mind):
147 """We go through a sequence of calls, gathering information, then
148 tell our Builders that they have a slave to work with.
150 @return: a Deferred that fires (with 'self') when our Builders are
151 prepared to deal with the slave.
153 self.slave = mind
154 d = self.slave.callRemote("print", "attached")
155 d.addErrback(lambda why: 0)
156 self.slave_status.connected = True
157 log.msg("bot attached")
159 # TODO: there is a window here (while we're retrieving slaveinfo)
160 # during which a disconnect or a duplicate-slave will be confusing
161 d.addCallback(lambda res: self.slave.callRemote("getSlaveInfo"))
162 d.addCallbacks(self.got_info, self.infoUnavailable)
163 d.addCallback(self._attached2)
164 d.addCallback(lambda res: self)
165 return d
167 def got_info(self, info):
168 log.msg("Got slaveinfo from '%s'" % self.slavename)
169 # TODO: info{} might have other keys
170 self.slave_status.admin = info.get("admin")
171 self.slave_status.host = info.get("host")
173 def infoUnavailable(self, why):
174 # maybe an old slave, doesn't implement remote_getSlaveInfo
175 log.msg("BotPerspective.infoUnavailable")
176 log.err(why)
178 def _attached2(self, res):
179 d = self.slave.callRemote("getCommands")
180 d.addCallback(self.got_commands)
181 d.addErrback(self._commandsUnavailable)
182 d.addCallback(self._attached3)
183 return d
185 def got_commands(self, commands):
186 self.slave_commands = commands
188 def _commandsUnavailable(self, why):
189 # probably an old slave
190 log.msg("BotPerspective._commandsUnavailable")
191 if why.check(AttributeError):
192 return
193 log.err(why)
195 def _attached3(self, res):
196 d = self.slave.callRemote("getDirs")
197 d.addCallback(self.got_dirs)
198 d.addErrback(self._dirsFailed)
199 d.addCallback(self._attached4)
200 return d
202 def got_dirs(self, dirs):
203 wanted = map(lambda b: b.builddir, self.builders)
204 unwanted = []
205 for d in dirs:
206 if d not in wanted and d != "info":
207 unwanted.append(d)
208 if unwanted:
209 log.msg("slave %s has leftover directories (%s): " % \
210 (self.slavename, string.join(unwanted, ',')) + \
211 "you can delete them now")
213 def _dirsFailed(self, why):
214 log.msg("BotPerspective._dirsFailed")
215 log.err(why)
217 def _attached4(self, res):
218 return self.sendBuilderList()
220 def sendBuilderList(self):
221 # now make sure their list of Builders matches ours
222 blist = []
223 for b in self.builders:
224 blist.append((b.name, b.builddir))
225 d = self.slave.callRemote("setBuilderList", blist)
226 d.addCallback(self.list_done)
227 d.addErrback(self._listFailed)
228 return d
230 def list_done(self, blist):
231 # this could come back at weird times. be prepared to handle oddness
232 dl = []
233 for name, remote in blist.items():
234 for b in self.builders:
235 if b.name == name:
236 # if we sent the builders list because of a config
237 # change, the Builder might already be attached.
238 # Builder.attached will ignore us if this happens.
239 d = b.attached(self, remote, self.slave_commands)
240 dl.append(d)
241 continue
242 return defer.DeferredList(dl)
244 def _listFailed(self, why):
245 log.msg("BotPerspective._listFailed")
246 log.err(why)
247 # TODO: hang up on them, without setBuilderList we can't use them
249 def perspective_forceBuild(self, name, who=None):
250 # slave admins are allowed to force any of their own builds
251 for b in self.builders:
252 if name == b.name:
253 try:
254 b.forceBuild(who, "slave requested build")
255 return "ok, starting build"
256 except interfaces.BuilderInUseError:
257 return "sorry, builder was in use"
258 except interfaces.NoSlaveError:
259 return "sorry, there is no slave to run the build"
260 else:
261 log.msg("slave requested build for unknown builder '%s'" % name)
262 return "sorry, invalid builder name"
264 def perspective_keepalive(self):
265 pass
267 def detached(self, mind):
268 self.slave = None
269 self.slave_status.connected = False
270 for b in self.builders:
271 b.detached(self)
272 log.msg("Botmaster.detached(%s)" % self.slavename)
275 class BotMaster(service.Service):
277 """This is the master-side service which manages remote buildbot slaves.
278 It provides them with BotPerspectives, and distributes file change
279 notification messages to them.
282 debug = 0
284 def __init__(self):
285 self.builders = {}
286 self.builderNames = []
287 # builders maps Builder names to instances of bb.p.builder.Builder,
288 # which is the master-side object that defines and controls a build.
289 # They are added by calling botmaster.addBuilder() from the startup
290 # code.
292 # self.slaves contains a ready BotPerspective instance for each
293 # potential buildslave, i.e. all the ones listed in the config file.
294 # If the slave is connected, self.slaves[slavename].slave will
295 # contain a RemoteReference to their Bot instance. If it is not
296 # connected, that attribute will hold None.
297 self.slaves = {} # maps slavename to BotPerspective
298 self.statusClientService = None
299 self.watchers = {}
301 # self.locks holds the real Lock instances
302 self.locks = {}
304 # these four are convenience functions for testing
306 def waitUntilBuilderAttached(self, name):
307 b = self.builders[name]
308 #if b.slaves:
309 # return defer.succeed(None)
310 d = defer.Deferred()
311 b.watchers['attach'].append(d)
312 return d
314 def waitUntilBuilderDetached(self, name):
315 b = self.builders.get(name)
316 if not b or not b.slaves:
317 return defer.succeed(None)
318 d = defer.Deferred()
319 b.watchers['detach'].append(d)
320 return d
322 def waitUntilBuilderFullyDetached(self, name):
323 b = self.builders.get(name)
324 # TODO: this looks too deeply inside the Builder object
325 if not b or not b.slaves:
326 return defer.succeed(None)
327 d = defer.Deferred()
328 b.watchers['detach_all'].append(d)
329 return d
331 def waitUntilBuilderIdle(self, name):
332 b = self.builders[name]
333 # TODO: this looks way too deeply inside the Builder object
334 for sb in b.slaves:
335 if sb.state != IDLE:
336 d = defer.Deferred()
337 b.watchers['idle'].append(d)
338 return d
339 return defer.succeed(None)
342 def addSlave(self, slavename):
343 slave = BotPerspective(slavename)
344 self.slaves[slavename] = slave
346 def removeSlave(self, slavename):
347 d = self.slaves[slavename].disconnect()
348 del self.slaves[slavename]
349 return d
351 def getBuildernames(self):
352 return self.builderNames
354 def addBuilder(self, builder):
355 """This is called by the setup code to define what builds should be
356 performed. Each Builder object has a build slave that should host
357 that build: the builds cannot be done until the right slave
358 connects.
360 @return: a Deferred that fires when an attached slave has accepted
361 the new builder.
364 if self.debug: print "addBuilder", builder
365 log.msg("Botmaster.addBuilder(%s)" % builder.name)
367 if builder.name in self.builderNames:
368 raise KeyError("muliply defined builder '%s'" % builder.name)
369 for slavename in builder.slavenames:
370 if not self.slaves.has_key(slavename):
371 raise KeyError("builder %s uses undefined slave %s" % \
372 (builder.name, slavename))
374 self.builders[builder.name] = builder
375 self.builderNames.append(builder.name)
376 builder.setBotmaster(self)
378 dl = [self.slaves[slavename].addBuilder(builder)
379 for slavename in builder.slavenames]
380 return defer.DeferredList(dl)
382 def removeBuilder(self, builder):
383 """Stop using a Builder.
384 This removes the Builder from the list of active Builders.
386 @return: a Deferred that fires when an attached slave has finished
387 removing the SlaveBuilder
389 if self.debug: print "removeBuilder", builder
390 log.msg("Botmaster.removeBuilder(%s)" % builder.name)
391 b = self.builders[builder.name]
392 del self.builders[builder.name]
393 self.builderNames.remove(builder.name)
394 for slavename in builder.slavenames:
395 slave = self.slaves.get(slavename)
396 if slave:
397 return slave.removeBuilder(builder)
398 return defer.succeed(None)
400 def getPerspective(self, slavename):
401 return self.slaves[slavename]
403 def shutdownSlaves(self):
404 # TODO: make this into a bot method rather than a builder method
405 for b in self.slaves.values():
406 b.shutdownSlave()
408 def stopService(self):
409 for b in self.builders.values():
410 b.builder_status.addPointEvent(["master", "shutdown"])
411 b.builder_status.saveYourself()
412 return service.Service.stopService(self)
414 def getLockByID(self, lockid):
415 """Convert a Lock identifier into an actual Lock instance.
416 @param lockid: a locks.MasterLock or locks.SlaveLock instance
417 @return: a locks.RealMasterLock or locks.RealSlaveLock instance
419 k = (lockid.__class__, lockid.name)
420 if not k in self.locks:
421 self.locks[k] = lockid.lockClass(lockid.name)
422 return self.locks[k]
424 ########################################
428 class DebugPerspective(NewCredPerspective):
429 def attached(self, mind):
430 return self
431 def detached(self, mind):
432 pass
434 def perspective_forceBuild(self, buildername, who=None):
435 c = interfaces.IControl(self.master)
436 bc = c.getBuilder(buildername)
437 bc.forceBuild(who, "debug tool 'Force Build' button pushed")
439 def perspective_fakeChange(self, file, revision=None, who="fakeUser",
440 branch=None):
441 change = Change(who, [file], "some fake comments\n",
442 branch=branch, revision=revision)
443 c = interfaces.IControl(self.master)
444 c.addChange(change)
446 def perspective_setCurrentState(self, buildername, state):
447 builder = self.botmaster.builders.get(buildername)
448 if not builder: return
449 if state == "offline":
450 builder.statusbag.currentlyOffline()
451 if state == "idle":
452 builder.statusbag.currentlyIdle()
453 if state == "waiting":
454 builder.statusbag.currentlyWaiting(now()+10)
455 if state == "building":
456 builder.statusbag.currentlyBuilding(None)
457 def perspective_reload(self):
458 print "doing reload of the config file"
459 self.master.loadTheConfigFile()
460 def perspective_pokeIRC(self):
461 print "saying something on IRC"
462 from buildbot.status import words
463 for s in self.master:
464 if isinstance(s, words.IRC):
465 bot = s.f
466 for channel in bot.channels:
467 print " channel", channel
468 bot.p.msg(channel, "Ow, quit it")
470 def perspective_print(self, msg):
471 print "debug", msg
473 class Dispatcher(styles.Versioned):
474 if implements:
475 implements(portal.IRealm)
476 else:
477 __implements__ = portal.IRealm,
478 persistenceVersion = 2
480 def __init__(self):
481 self.names = {}
483 def upgradeToVersion1(self):
484 self.master = self.botmaster.parent
485 def upgradeToVersion2(self):
486 self.names = {}
488 def register(self, name, afactory):
489 self.names[name] = afactory
490 def unregister(self, name):
491 del self.names[name]
493 def requestAvatar(self, avatarID, mind, interface):
494 assert interface == pb.IPerspective
495 afactory = self.names.get(avatarID)
496 if afactory:
497 p = afactory.getPerspective()
498 elif avatarID == "debug":
499 p = DebugPerspective()
500 p.master = self.master
501 p.botmaster = self.botmaster
502 elif avatarID == "statusClient":
503 p = self.statusClientService.getPerspective()
504 else:
505 # it must be one of the buildslaves: no other names will make it
506 # past the checker
507 p = self.botmaster.getPerspective(avatarID)
509 if not p:
510 raise ValueError("no perspective for '%s'" % avatarID)
512 d = defer.maybeDeferred(p.attached, mind)
513 d.addCallback(self._avatarAttached, mind)
514 return d
516 def _avatarAttached(self, p, mind):
517 return (pb.IPerspective, p, lambda p=p,mind=mind: p.detached(mind))
519 ########################################
521 # service hierarchy:
522 # BuildMaster
523 # BotMaster
524 # ChangeMaster
525 # all IChangeSource objects
526 # StatusClientService
527 # TCPClient(self.ircFactory)
528 # TCPServer(self.slaveFactory) -> dispatcher.requestAvatar
529 # TCPServer(self.site)
530 # UNIXServer(ResourcePublisher(self.site))
533 class BuildMaster(service.MultiService, styles.Versioned):
534 debug = 0
535 persistenceVersion = 3
536 manhole = None
537 debugPassword = None
538 projectName = "(unspecified)"
539 projectURL = None
540 buildbotURL = None
541 change_svc = None
543 def __init__(self, basedir, configFileName="master.cfg"):
544 service.MultiService.__init__(self)
545 self.setName("buildmaster")
546 self.basedir = basedir
547 self.configFileName = configFileName
549 # the dispatcher is the realm in which all inbound connections are
550 # looked up: slave builders, change notifications, status clients, and
551 # the debug port
552 dispatcher = Dispatcher()
553 dispatcher.master = self
554 self.dispatcher = dispatcher
555 self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
556 # the checker starts with no user/passwd pairs: they are added later
557 p = portal.Portal(dispatcher)
558 p.registerChecker(self.checker)
559 self.slaveFactory = pb.PBServerFactory(p)
560 self.slaveFactory.unsafeTracebacks = True # let them see exceptions
562 self.slavePortnum = None
563 self.slavePort = None
565 self.botmaster = BotMaster()
566 self.botmaster.setName("botmaster")
567 self.botmaster.setServiceParent(self)
568 dispatcher.botmaster = self.botmaster
570 self.status = Status(self.botmaster, self.basedir)
572 self.statusTargets = []
574 self.bots = []
575 # this ChangeMaster is a dummy, only used by tests. In the real
576 # buildmaster, where the BuildMaster instance is activated
577 # (startService is called) by twistd, this attribute is overwritten.
578 self.useChanges(ChangeMaster())
580 self.readConfig = False
582 def upgradeToVersion1(self):
583 self.dispatcher = self.slaveFactory.root.portal.realm
585 def upgradeToVersion2(self): # post-0.4.3
586 self.webServer = self.webTCPPort
587 del self.webTCPPort
588 self.webDistribServer = self.webUNIXPort
589 del self.webUNIXPort
590 self.configFileName = "master.cfg"
592 def upgradeToVersion3(self):
593 # post 0.6.3, solely to deal with the 0.6.3 breakage. Starting with
594 # 0.6.5 I intend to do away with .tap files altogether
595 self.services = []
596 self.namedServices = {}
597 del self.change_svc
599 def startService(self):
600 service.MultiService.startService(self)
601 self.loadChanges() # must be done before loading the config file
602 if not self.readConfig:
603 # TODO: consider catching exceptions during this call to
604 # loadTheConfigFile and bailing (reactor.stop) if it fails,
605 # since without a config file we can't do anything except reload
606 # the config file, and it would be nice for the user to discover
607 # this quickly.
608 self.loadTheConfigFile()
609 if signal and hasattr(signal, "SIGHUP"):
610 signal.signal(signal.SIGHUP, self._handleSIGHUP)
611 for b in self.botmaster.builders.values():
612 b.builder_status.addPointEvent(["master", "started"])
613 b.builder_status.saveYourself()
615 def useChanges(self, changes):
616 if self.change_svc:
617 # TODO: can return a Deferred
618 self.change_svc.disownServiceParent()
619 self.change_svc = changes
620 self.change_svc.basedir = self.basedir
621 self.change_svc.setName("changemaster")
622 self.dispatcher.changemaster = self.change_svc
623 self.change_svc.setServiceParent(self)
625 def loadChanges(self):
626 filename = os.path.join(self.basedir, "changes.pck")
627 try:
628 changes = pickle.load(open(filename, "rb"))
629 styles.doUpgrade()
630 except IOError:
631 log.msg("changes.pck missing, using new one")
632 changes = ChangeMaster()
633 except EOFError:
634 log.msg("corrupted changes.pck, using new one")
635 changes = ChangeMaster()
636 self.useChanges(changes)
638 def _handleSIGHUP(self, *args):
639 reactor.callLater(0, self.loadTheConfigFile)
641 def getStatus(self):
643 @rtype: L{buildbot.status.builder.Status}
645 return self.status
647 def loadTheConfigFile(self, configFile=None):
648 if not configFile:
649 configFile = os.path.join(self.basedir, self.configFileName)
651 log.msg("loading configuration from %s" % configFile)
652 configFile = os.path.expanduser(configFile)
654 try:
655 f = open(configFile, "r")
656 except IOError, e:
657 log.msg("unable to open config file '%s'" % configFile)
658 log.msg("leaving old configuration in place")
659 log.err(e)
660 return
662 try:
663 self.loadConfig(f)
664 except:
665 log.msg("error during loadConfig")
666 log.err()
667 f.close()
669 def loadConfig(self, f):
670 """Internal function to load a specific configuration file. Any
671 errors in the file will be signalled by raising an exception.
673 @return: a Deferred that will fire (with None) when the configuration
674 changes have been completed. This may involve a round-trip to each
675 buildslave that was involved."""
677 localDict = {'basedir': os.path.expanduser(self.basedir)}
678 try:
679 exec f in localDict
680 except:
681 log.msg("error while parsing config file")
682 raise
684 try:
685 config = localDict['BuildmasterConfig']
686 except KeyError:
687 log.err("missing config dictionary")
688 log.err("config file must define BuildmasterConfig")
689 raise
691 known_keys = "bots sources schedulers builders slavePortnum " + \
692 "debugPassword manhole " + \
693 "status projectName projectURL buildbotURL"
694 known_keys = known_keys.split()
695 for k in config.keys():
696 if k not in known_keys:
697 log.msg("unknown key '%s' defined in config dictionary" % k)
699 try:
700 # required
701 bots = config['bots']
702 sources = config['sources']
703 schedulers = config['schedulers']
704 builders = config['builders']
705 slavePortnum = config['slavePortnum']
707 # optional
708 debugPassword = config.get('debugPassword')
709 manhole = config.get('manhole')
710 status = config.get('status', [])
711 projectName = config.get('projectName')
712 projectURL = config.get('projectURL')
713 buildbotURL = config.get('buildbotURL')
715 except KeyError, e:
716 log.msg("config dictionary is missing a required parameter")
717 log.msg("leaving old configuration in place")
718 raise
720 # do some validation first
721 for name, passwd in bots:
722 if name in ("debug", "change", "status"):
723 raise KeyError, "reserved name '%s' used for a bot" % name
724 if config.has_key('interlocks'):
725 raise KeyError("c['interlocks'] is no longer accepted")
727 assert isinstance(sources, (list, tuple))
728 for s in sources:
729 assert interfaces.IChangeSource(s, None)
730 # this assertion catches c['schedulers'] = Scheduler(), since
731 # Schedulers are service.MultiServices and thus iterable.
732 assert isinstance(schedulers, (list, tuple))
733 for s in schedulers:
734 assert interfaces.IScheduler(s, None)
735 assert isinstance(status, (list, tuple))
736 for s in status:
737 assert interfaces.IStatusReceiver(s, None)
739 slavenames = [name for name,pw in bots]
740 buildernames = []
741 dirnames = []
742 for b in builders:
743 if type(b) is tuple:
744 raise ValueError("builder %s must be defined with a dict, "
745 "not a tuple" % b[0])
746 if b.has_key('slavename') and b['slavename'] not in slavenames:
747 raise ValueError("builder %s uses undefined slave %s" \
748 % (b['name'], b['slavename']))
749 for n in b.get('slavenames', []):
750 if n not in slavenames:
751 raise ValueError("builder %s uses undefined slave %s" \
752 % (b['name'], n))
753 if b['name'] in buildernames:
754 raise ValueError("duplicate builder name %s"
755 % b['name'])
756 buildernames.append(b['name'])
757 if b['builddir'] in dirnames:
758 raise ValueError("builder %s reuses builddir %s"
759 % (b['name'], b['builddir']))
760 dirnames.append(b['builddir'])
762 for s in schedulers:
763 for b in s.listBuilderNames():
764 assert b in buildernames, \
765 "%s uses unknown builder %s" % (s, b)
767 # assert that all locks used by the Builds and their Steps are
768 # uniquely named.
769 locks = {}
770 for b in builders:
771 for l in b.get('locks', []):
772 if locks.has_key(l.name):
773 if locks[l.name] is not l:
774 raise ValueError("Two different locks (%s and %s) "
775 "share the name %s"
776 % (l, locks[l.name], l.name))
777 else:
778 locks[l.name] = l
779 # TODO: this will break with any BuildFactory that doesn't use a
780 # .steps list, but I think the verification step is more
781 # important.
782 for s in b['factory'].steps:
783 for l in s[1].get('locks', []):
784 if locks.has_key(l.name):
785 if locks[l.name] is not l:
786 raise ValueError("Two different locks (%s and %s)"
787 " share the name %s"
788 % (l, locks[l.name], l.name))
789 else:
790 locks[l.name] = l
792 # slavePortnum supposed to be a strports specification
793 if type(slavePortnum) is int:
794 slavePortnum = "tcp:%d" % slavePortnum
796 # now we're committed to implementing the new configuration, so do
797 # it atomically
798 # TODO: actually, this is spread across a couple of Deferreds, so it
799 # really isn't atomic.
801 d = defer.succeed(None)
803 self.projectName = projectName
804 self.projectURL = projectURL
805 self.buildbotURL = buildbotURL
807 # self.bots: Disconnect any that were attached and removed from the
808 # list. Update self.checker with the new list of passwords,
809 # including debug/change/status.
810 d.addCallback(lambda res: self.loadConfig_Slaves(bots))
812 # self.debugPassword
813 if debugPassword:
814 self.checker.addUser("debug", debugPassword)
815 self.debugPassword = debugPassword
817 # self.manhole
818 if manhole != self.manhole:
819 # changing
820 if self.manhole:
821 # disownServiceParent may return a Deferred
822 d.addCallback(lambda res: self.manhole.disownServiceParent())
823 def _remove(res):
824 self.manhole = None
825 return res
826 d.addCallback(_remove)
827 if manhole:
828 def _add(res):
829 self.manhole = manhole
830 manhole.setServiceParent(self)
831 d.addCallback(_add)
833 # add/remove self.botmaster.builders to match builders. The
834 # botmaster will handle startup/shutdown issues.
835 d.addCallback(lambda res: self.loadConfig_Builders(builders))
837 d.addCallback(lambda res: self.loadConfig_status(status))
839 # Schedulers are added after Builders in case they start right away
840 d.addCallback(lambda res: self.loadConfig_Schedulers(schedulers))
841 # and Sources go after Schedulers for the same reason
842 d.addCallback(lambda res: self.loadConfig_Sources(sources))
844 # self.slavePort
845 if self.slavePortnum != slavePortnum:
846 if self.slavePort:
847 def closeSlavePort(res):
848 d1 = self.slavePort.disownServiceParent()
849 self.slavePort = None
850 return d1
851 d.addCallback(closeSlavePort)
852 if slavePortnum is not None:
853 def openSlavePort(res):
854 self.slavePort = strports.service(slavePortnum,
855 self.slaveFactory)
856 self.slavePort.setServiceParent(self)
857 d.addCallback(openSlavePort)
858 log.msg("BuildMaster listening on port %s" % slavePortnum)
859 self.slavePortnum = slavePortnum
861 log.msg("configuration update started")
862 d.addCallback(lambda res: log.msg("configuration update complete"))
863 self.readConfig = True # TODO: consider not setting this until the
864 # Deferred fires.
865 return d
867 def loadConfig_Slaves(self, bots):
868 # set up the Checker with the names and passwords of all valid bots
869 self.checker.users = {} # violates abstraction, oh well
870 for user, passwd in bots:
871 self.checker.addUser(user, passwd)
872 self.checker.addUser("change", "changepw")
874 # identify new/old bots
875 old = self.bots; oldnames = [name for name,pw in old]
876 new = bots; newnames = [name for name,pw in new]
877 # removeSlave will hang up on the old bot
878 dl = [self.botmaster.removeSlave(name)
879 for name in oldnames if name not in newnames]
880 [self.botmaster.addSlave(name)
881 for name in newnames if name not in oldnames]
883 # all done
884 self.bots = bots
885 return defer.DeferredList(dl, fireOnOneErrback=1, consumeErrors=0)
887 def loadConfig_Sources(self, sources):
888 log.msg("loadConfig_Sources, change_svc is", self.change_svc,
889 self.change_svc.parent)
890 # shut down any that were removed, start any that were added
891 deleted_sources = [s for s in self.change_svc if s not in sources]
892 added_sources = [s for s in sources if s not in self.change_svc]
893 dl = [self.change_svc.removeSource(s) for s in deleted_sources]
894 def addNewOnes(res):
895 [self.change_svc.addSource(s) for s in added_sources]
896 d = defer.DeferredList(dl, fireOnOneErrback=1, consumeErrors=0)
897 d.addCallback(addNewOnes)
898 return d
900 def allSchedulers(self):
901 # TODO: when twisted-1.3 compatibility is dropped, switch to the
902 # providedBy form, because it's faster (no actual adapter lookup)
903 return [child for child in self
904 #if interfaces.IScheduler.providedBy(child)]
905 if interfaces.IScheduler(child, None)]
908 def loadConfig_Schedulers(self, newschedulers):
909 oldschedulers = self.allSchedulers()
910 removed = [s for s in oldschedulers if s not in newschedulers]
911 added = [s for s in newschedulers if s not in oldschedulers]
912 dl = [defer.maybeDeferred(s.disownServiceParent) for s in removed]
913 def addNewOnes(res):
914 for s in added:
915 s.setServiceParent(self)
916 d = defer.DeferredList(dl, fireOnOneErrback=1)
917 d.addCallback(addNewOnes)
918 return d
920 def loadConfig_Builders(self, newBuilders):
921 dl = []
922 old = self.botmaster.getBuildernames()
923 newNames = []
924 newList = {}
925 for data in newBuilders:
926 name = data['name']
927 newList[name] = data
928 newNames.append(name)
930 # identify all that were removed
931 for old in self.botmaster.builders.values()[:]:
932 if old.name not in newList.keys():
933 log.msg("removing old builder %s" % old.name)
934 d = self.botmaster.removeBuilder(old)
935 dl.append(d)
936 # announce the change
937 self.status.builderRemoved(old.name)
939 # everything in newList is either unchanged, changed, or new
940 for newName, data in newList.items():
941 old = self.botmaster.builders.get(newName)
942 name = data['name']
943 basedir = data['builddir'] # used on both master and slave
944 #name, slave, builddir, factory = data
945 if not old: # new
946 # category added after 0.6.2
947 category = data.get('category', None)
948 log.msg("adding new builder %s for category %s" %
949 (name, category))
950 statusbag = self.status.builderAdded(name, basedir, category)
951 builder = Builder(data, statusbag)
952 d = self.botmaster.addBuilder(builder)
953 dl.append(d)
954 else:
955 diffs = old.compareToSetup(data)
956 if not diffs: # unchanged: leave it alone
957 log.msg("builder %s is unchanged" % name)
958 pass
959 else:
960 # changed: remove and re-add. Don't touch the statusbag
961 # object: the clients won't see a remove/add cycle
962 log.msg("updating builder %s: %s" % (name,
963 "\n".join(diffs)))
964 # TODO: if the basedir was changed, we probably need to
965 # make a new statusbag
966 # TODO: if a slave is connected and we're re-using the
967 # same slave, try to avoid a disconnect/reconnect cycle.
968 statusbag = old.builder_status
969 statusbag.saveYourself() # seems like a good idea
970 d = self.botmaster.removeBuilder(old)
971 dl.append(d)
972 builder = Builder(data, statusbag)
973 # point out that the builder was updated
974 statusbag.addPointEvent(["config", "updated"])
975 d = self.botmaster.addBuilder(builder)
976 dl.append(d)
977 # now that everything is up-to-date, make sure the names are in the
978 # desired order
979 self.botmaster.builderNames = newNames
980 return defer.DeferredList(dl, fireOnOneErrback=1, consumeErrors=0)
982 def loadConfig_status(self, status):
983 dl = []
985 # remove old ones
986 for s in self.statusTargets[:]:
987 if not s in status:
988 log.msg("removing IStatusReceiver", s)
989 d = defer.maybeDeferred(s.disownServiceParent)
990 dl.append(d)
991 self.statusTargets.remove(s)
992 # after those are finished going away, add new ones
993 def addNewOnes(res):
994 for s in status:
995 if not s in self.statusTargets:
996 log.msg("adding IStatusReceiver", s)
997 s.setServiceParent(self)
998 self.statusTargets.append(s)
999 d = defer.DeferredList(dl, fireOnOneErrback=1)
1000 d.addCallback(addNewOnes)
1001 return d
1004 def addChange(self, change):
1005 for s in self.allSchedulers():
1006 s.addChange(change)
1008 def submitBuildSet(self, bs):
1009 # determine the set of Builders to use
1010 builders = []
1011 for name in bs.builderNames:
1012 b = self.botmaster.builders.get(name)
1013 if b:
1014 if b not in builders:
1015 builders.append(b)
1016 continue
1017 # TODO: add aliases like 'all'
1018 raise KeyError("no such builder named '%s'" % name)
1020 # now tell the BuildSet to create BuildRequests for all those
1021 # Builders and submit them
1022 bs.start(builders)
1023 self.status.buildsetSubmitted(bs.status)
1026 class Control:
1027 if implements:
1028 implements(interfaces.IControl)
1029 else:
1030 __implements__ = interfaces.IControl,
1032 def __init__(self, master):
1033 self.master = master
1035 def addChange(self, change):
1036 self.master.change_svc.addChange(change)
1038 def submitBuildSet(self, bs):
1039 self.master.submitBuildSet(bs)
1041 def getBuilder(self, name):
1042 b = self.master.botmaster.builders[name]
1043 return interfaces.IBuilderControl(b)
1045 components.registerAdapter(Control, BuildMaster, interfaces.IControl)
1047 # so anybody who can get a handle on the BuildMaster can force a build with:
1048 # IControl(master).getBuilder("full-2.3").forceBuild("me", "boredom")