finally remove forceBuild
[buildbot.git] / buildbot / master.py
blobf0caa5d1c5878d83382ae0f373c88b3c48e441a0
1 # -*- test-case-name: buildbot.test.test_run -*-
3 import string, os
4 signal = None
5 try:
6 import signal
7 except ImportError:
8 pass
9 try:
10 import cPickle
11 pickle = cPickle
12 except ImportError:
13 import pickle
15 from twisted.python import log, components
16 from twisted.internet import defer, reactor
17 from twisted.spread import pb
18 from twisted.cred import portal, checkers
19 from twisted.application import service, strports
20 from twisted.persisted import styles
22 # sibling imports
23 from buildbot.twcompat import implements
24 from buildbot.util import now
25 from buildbot.pbutil import NewCredPerspective
26 from buildbot.process.builder import Builder, IDLE
27 from buildbot.process.base import BuildRequest
28 from buildbot.status.builder import SlaveStatus, Status
29 from buildbot.changes.changes import Change, ChangeMaster
30 from buildbot.sourcestamp import SourceStamp
31 from buildbot import interfaces
33 ########################################
38 class BotPerspective(NewCredPerspective):
39 """This is the master-side representative for a remote buildbot slave.
40 There is exactly one for each slave described in the config file (the
41 c['bots'] list). When buildbots connect in (.attach), they get a
42 reference to this instance. The BotMaster object is stashed as the
43 .service attribute."""
45 def __init__(self, name, botmaster):
46 self.slavename = name
47 self.botmaster = botmaster
48 self.slave_status = SlaveStatus(name)
49 self.slave = None # a RemoteReference to the Bot, when connected
50 self.slave_commands = None
52 def updateSlave(self):
53 """Called to add or remove builders after the slave has connected.
55 @return: a Deferred that indicates when an attached slave has
56 accepted the new builders and/or released the old ones."""
57 if self.slave:
58 return self.sendBuilderList()
59 return defer.succeed(None)
61 def __repr__(self):
62 return "<BotPerspective '%s', builders: %s>" % \
63 (self.slavename,
64 string.join(map(lambda b: b.name, self.builders), ','))
66 def attached(self, bot):
67 """This is called when the slave connects.
69 @return: a Deferred that fires with a suitable pb.IPerspective to
70 give to the slave (i.e. 'self')"""
72 if self.slave:
73 # uh-oh, we've got a duplicate slave. The most likely
74 # explanation is that the slave is behind a slow link, thinks we
75 # went away, and has attempted to reconnect, so we've got two
76 # "connections" from the same slave, but the previous one is
77 # stale. Give the new one precedence.
78 log.msg("duplicate slave %s replacing old one" % self.slavename)
80 # just in case we've got two identically-configured slaves,
81 # report the IP addresses of both so someone can resolve the
82 # squabble
83 tport = self.slave.broker.transport
84 log.msg("old slave was connected from", tport.getPeer())
85 log.msg("new slave is from", bot.broker.transport.getPeer())
86 d = self.disconnect()
87 else:
88 d = defer.succeed(None)
89 # now we go through a sequence of calls, gathering information, then
90 # tell the Botmaster that it can finally give this slave to all the
91 # Builders that care about it.
93 # we accumulate slave information in this 'state' dictionary, then
94 # set it atomically if we make it far enough through the process
95 state = {}
97 def _log_attachment_on_slave(res):
98 d1 = bot.callRemote("print", "attached")
99 d1.addErrback(lambda why: None)
100 return d1
101 d.addCallback(_log_attachment_on_slave)
103 def _get_info(res):
104 d1 = bot.callRemote("getSlaveInfo")
105 def _got_info(info):
106 log.msg("Got slaveinfo from '%s'" % self.slavename)
107 # TODO: info{} might have other keys
108 state["admin"] = info.get("admin")
109 state["host"] = info.get("host")
110 def _info_unavailable(why):
111 # maybe an old slave, doesn't implement remote_getSlaveInfo
112 log.msg("BotPerspective.info_unavailable")
113 log.err(why)
114 d1.addCallbacks(_got_info, _info_unavailable)
115 return d1
116 d.addCallback(_get_info)
118 def _get_commands(res):
119 d1 = bot.callRemote("getCommands")
120 def _got_commands(commands):
121 state["slave_commands"] = commands
122 def _commands_unavailable(why):
123 # probably an old slave
124 log.msg("BotPerspective._commands_unavailable")
125 if why.check(AttributeError):
126 return
127 log.err(why)
128 d1.addCallbacks(_got_commands, _commands_unavailable)
129 return d1
130 d.addCallback(_get_commands)
132 def _accept_slave(res):
133 self.slave_status.setAdmin(state.get("admin"))
134 self.slave_status.setHost(state.get("host"))
135 self.slave_status.setConnected(True)
136 self.slave_commands = state.get("slave_commands")
137 self.slave = bot
138 log.msg("bot attached")
139 return self.updateSlave()
140 d.addCallback(_accept_slave)
142 # Finally, the slave gets a reference to this BotPerspective. They
143 # receive this later, after we've started using them.
144 d.addCallback(lambda res: self)
145 return d
147 def detached(self, mind):
148 self.slave = None
149 self.slave_status.setConnected(False)
150 self.botmaster.slaveLost(self)
151 log.msg("BotPerspective.detached(%s)" % self.slavename)
154 def disconnect(self):
155 """Forcibly disconnect the slave.
157 This severs the TCP connection and returns a Deferred that will fire
158 (with None) when the connection is probably gone.
160 If the slave is still alive, they will probably try to reconnect
161 again in a moment.
163 This is called in two circumstances. The first is when a slave is
164 removed from the config file. In this case, when they try to
165 reconnect, they will be rejected as an unknown slave. The second is
166 when we wind up with two connections for the same slave, in which
167 case we disconnect the older connection.
170 if not self.slave:
171 return defer.succeed(None)
172 log.msg("disconnecting old slave %s now" % self.slavename)
174 # all kinds of teardown will happen as a result of
175 # loseConnection(), but it happens after a reactor iteration or
176 # two. Hook the actual disconnect so we can know when it is safe
177 # to connect the new slave. We have to wait one additional
178 # iteration (with callLater(0)) to make sure the *other*
179 # notifyOnDisconnect handlers have had a chance to run.
180 d = defer.Deferred()
182 # notifyOnDisconnect runs the callback with one argument, the
183 # RemoteReference being disconnected.
184 def _disconnected(rref):
185 reactor.callLater(0, d.callback, None)
186 self.slave.notifyOnDisconnect(_disconnected)
187 tport = self.slave.broker.transport
188 # this is the polite way to request that a socket be closed
189 tport.loseConnection()
190 try:
191 # but really we don't want to wait for the transmit queue to
192 # drain. The remote end is unlikely to ACK the data, so we'd
193 # probably have to wait for a (20-minute) TCP timeout.
194 #tport._closeSocket()
195 # however, doing _closeSocket (whether before or after
196 # loseConnection) somehow prevents the notifyOnDisconnect
197 # handlers from being run. Bummer.
198 tport.offset = 0
199 tport.dataBuffer = ""
200 pass
201 except:
202 # however, these hacks are pretty internal, so don't blow up if
203 # they fail or are unavailable
204 log.msg("failed to accelerate the shutdown process")
205 pass
206 log.msg("waiting for slave to finish disconnecting")
208 # When this Deferred fires, we'll be ready to accept the new slave
209 return d
211 def sendBuilderList(self):
212 our_builders = self.botmaster.getBuildersForSlave(self.slavename)
213 blist = [(b.name, b.builddir) for b in our_builders]
214 d = self.slave.callRemote("setBuilderList", blist)
215 def _sent(slist):
216 dl = []
217 for name, remote in slist.items():
218 # use get() since we might have changed our mind since then
219 b = self.botmaster.builders.get(name)
220 if b:
221 d1 = b.attached(self, remote, self.slave_commands)
222 dl.append(d1)
223 return defer.DeferredList(dl)
224 def _set_failed(why):
225 log.msg("BotPerspective.sendBuilderList (%s) failed" % self)
226 log.err(why)
227 # TODO: hang up on them?, without setBuilderList we can't use
228 # them
229 d.addCallbacks(_sent, _set_failed)
230 return d
232 def perspective_keepalive(self):
233 pass
236 class BotMaster(service.Service):
238 """This is the master-side service which manages remote buildbot slaves.
239 It provides them with BotPerspectives, and distributes file change
240 notification messages to them.
243 debug = 0
245 def __init__(self):
246 self.builders = {}
247 self.builderNames = []
248 # builders maps Builder names to instances of bb.p.builder.Builder,
249 # which is the master-side object that defines and controls a build.
250 # They are added by calling botmaster.addBuilder() from the startup
251 # code.
253 # self.slaves contains a ready BotPerspective instance for each
254 # potential buildslave, i.e. all the ones listed in the config file.
255 # If the slave is connected, self.slaves[slavename].slave will
256 # contain a RemoteReference to their Bot instance. If it is not
257 # connected, that attribute will hold None.
258 self.slaves = {} # maps slavename to BotPerspective
259 self.statusClientService = None
260 self.watchers = {}
262 # self.locks holds the real Lock instances
263 self.locks = {}
265 # these four are convenience functions for testing
267 def waitUntilBuilderAttached(self, name):
268 b = self.builders[name]
269 #if b.slaves:
270 # return defer.succeed(None)
271 d = defer.Deferred()
272 b.watchers['attach'].append(d)
273 return d
275 def waitUntilBuilderDetached(self, name):
276 b = self.builders.get(name)
277 if not b or not b.slaves:
278 return defer.succeed(None)
279 d = defer.Deferred()
280 b.watchers['detach'].append(d)
281 return d
283 def waitUntilBuilderFullyDetached(self, name):
284 b = self.builders.get(name)
285 # TODO: this looks too deeply inside the Builder object
286 if not b or not b.slaves:
287 return defer.succeed(None)
288 d = defer.Deferred()
289 b.watchers['detach_all'].append(d)
290 return d
292 def waitUntilBuilderIdle(self, name):
293 b = self.builders[name]
294 # TODO: this looks way too deeply inside the Builder object
295 for sb in b.slaves:
296 if sb.state != IDLE:
297 d = defer.Deferred()
298 b.watchers['idle'].append(d)
299 return d
300 return defer.succeed(None)
303 def addSlave(self, slavename):
304 slave = BotPerspective(slavename, self)
305 self.slaves[slavename] = slave
307 def removeSlave(self, slavename):
308 d = self.slaves[slavename].disconnect()
309 del self.slaves[slavename]
310 return d
312 def slaveLost(self, bot):
313 for name, b in self.builders.items():
314 if bot.slavename in b.slavenames:
315 b.detached(bot)
317 def getBuildersForSlave(self, slavename):
318 return [b
319 for b in self.builders.values()
320 if slavename in b.slavenames]
322 def getBuildernames(self):
323 return self.builderNames
325 def getBuilders(self):
326 allBuilders = [self.builders[name] for name in self.builderNames]
327 return allBuilders
329 def setBuilders(self, builders):
330 self.builders = {}
331 self.builderNames = []
332 for b in builders:
333 for slavename in b.slavenames:
334 # this is actually validated earlier
335 assert slavename in self.slaves
336 self.builders[b.name] = b
337 self.builderNames.append(b.name)
338 b.setBotmaster(self)
339 d = self._updateAllSlaves()
340 return d
342 def _updateAllSlaves(self):
343 """Notify all buildslaves about changes in their Builders."""
344 dl = [s.updateSlave() for s in self.slaves.values()]
345 return defer.DeferredList(dl)
347 def maybeStartAllBuilds(self):
348 for b in self.builders.values():
349 b.maybeStartBuild()
351 def getPerspective(self, slavename):
352 return self.slaves[slavename]
354 def shutdownSlaves(self):
355 # TODO: make this into a bot method rather than a builder method
356 for b in self.slaves.values():
357 b.shutdownSlave()
359 def stopService(self):
360 for b in self.builders.values():
361 b.builder_status.addPointEvent(["master", "shutdown"])
362 b.builder_status.saveYourself()
363 return service.Service.stopService(self)
365 def getLockByID(self, lockid):
366 """Convert a Lock identifier into an actual Lock instance.
367 @param lockid: a locks.MasterLock or locks.SlaveLock instance
368 @return: a locks.RealMasterLock or locks.RealSlaveLock instance
370 if not lockid in self.locks:
371 self.locks[lockid] = lockid.lockClass(lockid)
372 # if the master.cfg file has changed maxCount= on the lock, the next
373 # time a build is started, they'll get a new RealLock instance. Note
374 # that this requires that MasterLock and SlaveLock (marker) instances
375 # be hashable and that they should compare properly.
376 return self.locks[lockid]
378 ########################################
382 class DebugPerspective(NewCredPerspective):
383 def attached(self, mind):
384 return self
385 def detached(self, mind):
386 pass
388 def perspective_requestBuild(self, buildername, reason, branch, revision):
389 c = interfaces.IControl(self.master)
390 bc = c.getBuilder(buildername)
391 ss = SourceStamp(branch, revision)
392 br = BuildRequest(reason, ss, buildername)
393 bc.requestBuild(br)
395 def perspective_pingBuilder(self, buildername):
396 c = interfaces.IControl(self.master)
397 bc = c.getBuilder(buildername)
398 bc.ping()
400 def perspective_fakeChange(self, file, revision=None, who="fakeUser",
401 branch=None):
402 change = Change(who, [file], "some fake comments\n",
403 branch=branch, revision=revision)
404 c = interfaces.IControl(self.master)
405 c.addChange(change)
407 def perspective_setCurrentState(self, buildername, state):
408 builder = self.botmaster.builders.get(buildername)
409 if not builder: return
410 if state == "offline":
411 builder.statusbag.currentlyOffline()
412 if state == "idle":
413 builder.statusbag.currentlyIdle()
414 if state == "waiting":
415 builder.statusbag.currentlyWaiting(now()+10)
416 if state == "building":
417 builder.statusbag.currentlyBuilding(None)
418 def perspective_reload(self):
419 print "doing reload of the config file"
420 self.master.loadTheConfigFile()
421 def perspective_pokeIRC(self):
422 print "saying something on IRC"
423 from buildbot.status import words
424 for s in self.master:
425 if isinstance(s, words.IRC):
426 bot = s.f
427 for channel in bot.channels:
428 print " channel", channel
429 bot.p.msg(channel, "Ow, quit it")
431 def perspective_print(self, msg):
432 print "debug", msg
434 class Dispatcher(styles.Versioned):
435 if implements:
436 implements(portal.IRealm)
437 else:
438 __implements__ = portal.IRealm,
439 persistenceVersion = 2
441 def __init__(self):
442 self.names = {}
444 def upgradeToVersion1(self):
445 self.master = self.botmaster.parent
446 def upgradeToVersion2(self):
447 self.names = {}
449 def register(self, name, afactory):
450 self.names[name] = afactory
451 def unregister(self, name):
452 del self.names[name]
454 def requestAvatar(self, avatarID, mind, interface):
455 assert interface == pb.IPerspective
456 afactory = self.names.get(avatarID)
457 if afactory:
458 p = afactory.getPerspective()
459 elif avatarID == "debug":
460 p = DebugPerspective()
461 p.master = self.master
462 p.botmaster = self.botmaster
463 elif avatarID == "statusClient":
464 p = self.statusClientService.getPerspective()
465 else:
466 # it must be one of the buildslaves: no other names will make it
467 # past the checker
468 p = self.botmaster.getPerspective(avatarID)
470 if not p:
471 raise ValueError("no perspective for '%s'" % avatarID)
473 d = defer.maybeDeferred(p.attached, mind)
474 d.addCallback(self._avatarAttached, mind)
475 return d
477 def _avatarAttached(self, p, mind):
478 return (pb.IPerspective, p, lambda p=p,mind=mind: p.detached(mind))
480 ########################################
482 # service hierarchy:
483 # BuildMaster
484 # BotMaster
485 # ChangeMaster
486 # all IChangeSource objects
487 # StatusClientService
488 # TCPClient(self.ircFactory)
489 # TCPServer(self.slaveFactory) -> dispatcher.requestAvatar
490 # TCPServer(self.site)
491 # UNIXServer(ResourcePublisher(self.site))
494 class BuildMaster(service.MultiService, styles.Versioned):
495 debug = 0
496 persistenceVersion = 3
497 manhole = None
498 debugPassword = None
499 projectName = "(unspecified)"
500 projectURL = None
501 buildbotURL = None
502 change_svc = None
504 def __init__(self, basedir, configFileName="master.cfg"):
505 service.MultiService.__init__(self)
506 self.setName("buildmaster")
507 self.basedir = basedir
508 self.configFileName = configFileName
510 # the dispatcher is the realm in which all inbound connections are
511 # looked up: slave builders, change notifications, status clients, and
512 # the debug port
513 dispatcher = Dispatcher()
514 dispatcher.master = self
515 self.dispatcher = dispatcher
516 self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
517 # the checker starts with no user/passwd pairs: they are added later
518 p = portal.Portal(dispatcher)
519 p.registerChecker(self.checker)
520 self.slaveFactory = pb.PBServerFactory(p)
521 self.slaveFactory.unsafeTracebacks = True # let them see exceptions
523 self.slavePortnum = None
524 self.slavePort = None
526 self.botmaster = BotMaster()
527 self.botmaster.setName("botmaster")
528 self.botmaster.setServiceParent(self)
529 dispatcher.botmaster = self.botmaster
531 self.status = Status(self.botmaster, self.basedir)
533 self.statusTargets = []
535 self.bots = []
536 # this ChangeMaster is a dummy, only used by tests. In the real
537 # buildmaster, where the BuildMaster instance is activated
538 # (startService is called) by twistd, this attribute is overwritten.
539 self.useChanges(ChangeMaster())
541 self.readConfig = False
543 def upgradeToVersion1(self):
544 self.dispatcher = self.slaveFactory.root.portal.realm
546 def upgradeToVersion2(self): # post-0.4.3
547 self.webServer = self.webTCPPort
548 del self.webTCPPort
549 self.webDistribServer = self.webUNIXPort
550 del self.webUNIXPort
551 self.configFileName = "master.cfg"
553 def upgradeToVersion3(self):
554 # post 0.6.3, solely to deal with the 0.6.3 breakage. Starting with
555 # 0.6.5 I intend to do away with .tap files altogether
556 self.services = []
557 self.namedServices = {}
558 del self.change_svc
560 def startService(self):
561 service.MultiService.startService(self)
562 self.loadChanges() # must be done before loading the config file
563 if not self.readConfig:
564 # TODO: consider catching exceptions during this call to
565 # loadTheConfigFile and bailing (reactor.stop) if it fails,
566 # since without a config file we can't do anything except reload
567 # the config file, and it would be nice for the user to discover
568 # this quickly.
569 self.loadTheConfigFile()
570 if signal and hasattr(signal, "SIGHUP"):
571 signal.signal(signal.SIGHUP, self._handleSIGHUP)
572 for b in self.botmaster.builders.values():
573 b.builder_status.addPointEvent(["master", "started"])
574 b.builder_status.saveYourself()
576 def useChanges(self, changes):
577 if self.change_svc:
578 # TODO: can return a Deferred
579 self.change_svc.disownServiceParent()
580 self.change_svc = changes
581 self.change_svc.basedir = self.basedir
582 self.change_svc.setName("changemaster")
583 self.dispatcher.changemaster = self.change_svc
584 self.change_svc.setServiceParent(self)
586 def loadChanges(self):
587 filename = os.path.join(self.basedir, "changes.pck")
588 try:
589 changes = pickle.load(open(filename, "rb"))
590 styles.doUpgrade()
591 except IOError:
592 log.msg("changes.pck missing, using new one")
593 changes = ChangeMaster()
594 except EOFError:
595 log.msg("corrupted changes.pck, using new one")
596 changes = ChangeMaster()
597 self.useChanges(changes)
599 def _handleSIGHUP(self, *args):
600 reactor.callLater(0, self.loadTheConfigFile)
602 def getStatus(self):
604 @rtype: L{buildbot.status.builder.Status}
606 return self.status
608 def loadTheConfigFile(self, configFile=None):
609 if not configFile:
610 configFile = os.path.join(self.basedir, self.configFileName)
612 log.msg("loading configuration from %s" % configFile)
613 configFile = os.path.expanduser(configFile)
615 try:
616 f = open(configFile, "r")
617 except IOError, e:
618 log.msg("unable to open config file '%s'" % configFile)
619 log.msg("leaving old configuration in place")
620 log.err(e)
621 return
623 try:
624 self.loadConfig(f)
625 except:
626 log.msg("error during loadConfig")
627 log.err()
628 f.close()
630 def loadConfig(self, f):
631 """Internal function to load a specific configuration file. Any
632 errors in the file will be signalled by raising an exception.
634 @return: a Deferred that will fire (with None) when the configuration
635 changes have been completed. This may involve a round-trip to each
636 buildslave that was involved."""
638 localDict = {'basedir': os.path.expanduser(self.basedir)}
639 try:
640 exec f in localDict
641 except:
642 log.msg("error while parsing config file")
643 raise
645 try:
646 config = localDict['BuildmasterConfig']
647 except KeyError:
648 log.err("missing config dictionary")
649 log.err("config file must define BuildmasterConfig")
650 raise
652 known_keys = "bots sources schedulers builders slavePortnum " + \
653 "debugPassword manhole " + \
654 "status projectName projectURL buildbotURL"
655 known_keys = known_keys.split()
656 for k in config.keys():
657 if k not in known_keys:
658 log.msg("unknown key '%s' defined in config dictionary" % k)
660 try:
661 # required
662 bots = config['bots']
663 sources = config['sources']
664 schedulers = config['schedulers']
665 builders = config['builders']
666 slavePortnum = config['slavePortnum']
668 # optional
669 debugPassword = config.get('debugPassword')
670 manhole = config.get('manhole')
671 status = config.get('status', [])
672 projectName = config.get('projectName')
673 projectURL = config.get('projectURL')
674 buildbotURL = config.get('buildbotURL')
676 except KeyError, e:
677 log.msg("config dictionary is missing a required parameter")
678 log.msg("leaving old configuration in place")
679 raise
681 # do some validation first
682 for name, passwd in bots:
683 if name in ("debug", "change", "status"):
684 raise KeyError, "reserved name '%s' used for a bot" % name
685 if config.has_key('interlocks'):
686 raise KeyError("c['interlocks'] is no longer accepted")
688 assert isinstance(sources, (list, tuple))
689 for s in sources:
690 assert interfaces.IChangeSource(s, None)
691 # this assertion catches c['schedulers'] = Scheduler(), since
692 # Schedulers are service.MultiServices and thus iterable.
693 errmsg = "c['schedulers'] must be a list of Scheduler instances"
694 assert isinstance(schedulers, (list, tuple)), errmsg
695 for s in schedulers:
696 assert interfaces.IScheduler(s, None), errmsg
697 assert isinstance(status, (list, tuple))
698 for s in status:
699 assert interfaces.IStatusReceiver(s, None)
701 slavenames = [name for name,pw in bots]
702 buildernames = []
703 dirnames = []
704 for b in builders:
705 if type(b) is tuple:
706 raise ValueError("builder %s must be defined with a dict, "
707 "not a tuple" % b[0])
708 if b.has_key('slavename') and b['slavename'] not in slavenames:
709 raise ValueError("builder %s uses undefined slave %s" \
710 % (b['name'], b['slavename']))
711 for n in b.get('slavenames', []):
712 if n not in slavenames:
713 raise ValueError("builder %s uses undefined slave %s" \
714 % (b['name'], n))
715 if b['name'] in buildernames:
716 raise ValueError("duplicate builder name %s"
717 % b['name'])
718 buildernames.append(b['name'])
719 if b['builddir'] in dirnames:
720 raise ValueError("builder %s reuses builddir %s"
721 % (b['name'], b['builddir']))
722 dirnames.append(b['builddir'])
724 schedulernames = []
725 for s in schedulers:
726 for b in s.listBuilderNames():
727 assert b in buildernames, \
728 "%s uses unknown builder %s" % (s, b)
729 if s.name in schedulernames:
730 # TODO: schedulers share a namespace with other Service
731 # children of the BuildMaster node, like status plugins, the
732 # Manhole, the ChangeMaster, and the BotMaster (although most
733 # of these don't have names)
734 msg = ("Schedulers must have unique names, but "
735 "'%s' was a duplicate" % (s.name,))
736 raise ValueError(msg)
737 schedulernames.append(s.name)
739 # assert that all locks used by the Builds and their Steps are
740 # uniquely named.
741 locks = {}
742 for b in builders:
743 for l in b.get('locks', []):
744 if locks.has_key(l.name):
745 if locks[l.name] is not l:
746 raise ValueError("Two different locks (%s and %s) "
747 "share the name %s"
748 % (l, locks[l.name], l.name))
749 else:
750 locks[l.name] = l
751 # TODO: this will break with any BuildFactory that doesn't use a
752 # .steps list, but I think the verification step is more
753 # important.
754 for s in b['factory'].steps:
755 for l in s[1].get('locks', []):
756 if locks.has_key(l.name):
757 if locks[l.name] is not l:
758 raise ValueError("Two different locks (%s and %s)"
759 " share the name %s"
760 % (l, locks[l.name], l.name))
761 else:
762 locks[l.name] = l
764 # slavePortnum supposed to be a strports specification
765 if type(slavePortnum) is int:
766 slavePortnum = "tcp:%d" % slavePortnum
768 # now we're committed to implementing the new configuration, so do
769 # it atomically
770 # TODO: actually, this is spread across a couple of Deferreds, so it
771 # really isn't atomic.
773 d = defer.succeed(None)
775 self.projectName = projectName
776 self.projectURL = projectURL
777 self.buildbotURL = buildbotURL
779 # self.bots: Disconnect any that were attached and removed from the
780 # list. Update self.checker with the new list of passwords,
781 # including debug/change/status.
782 d.addCallback(lambda res: self.loadConfig_Slaves(bots))
784 # self.debugPassword
785 if debugPassword:
786 self.checker.addUser("debug", debugPassword)
787 self.debugPassword = debugPassword
789 # self.manhole
790 if manhole != self.manhole:
791 # changing
792 if self.manhole:
793 # disownServiceParent may return a Deferred
794 d.addCallback(lambda res: self.manhole.disownServiceParent())
795 def _remove(res):
796 self.manhole = None
797 return res
798 d.addCallback(_remove)
799 if manhole:
800 def _add(res):
801 self.manhole = manhole
802 manhole.setServiceParent(self)
803 d.addCallback(_add)
805 # add/remove self.botmaster.builders to match builders. The
806 # botmaster will handle startup/shutdown issues.
807 d.addCallback(lambda res: self.loadConfig_Builders(builders))
809 d.addCallback(lambda res: self.loadConfig_status(status))
811 # Schedulers are added after Builders in case they start right away
812 d.addCallback(lambda res: self.loadConfig_Schedulers(schedulers))
813 # and Sources go after Schedulers for the same reason
814 d.addCallback(lambda res: self.loadConfig_Sources(sources))
816 # self.slavePort
817 if self.slavePortnum != slavePortnum:
818 if self.slavePort:
819 def closeSlavePort(res):
820 d1 = self.slavePort.disownServiceParent()
821 self.slavePort = None
822 return d1
823 d.addCallback(closeSlavePort)
824 if slavePortnum is not None:
825 def openSlavePort(res):
826 self.slavePort = strports.service(slavePortnum,
827 self.slaveFactory)
828 self.slavePort.setServiceParent(self)
829 d.addCallback(openSlavePort)
830 log.msg("BuildMaster listening on port %s" % slavePortnum)
831 self.slavePortnum = slavePortnum
833 log.msg("configuration update started")
834 def _done(res):
835 self.readConfig = True
836 log.msg("configuration update complete")
837 d.addCallback(_done)
838 d.addCallback(lambda res: self.botmaster.maybeStartAllBuilds())
839 return d
841 def loadConfig_Slaves(self, bots):
842 # set up the Checker with the names and passwords of all valid bots
843 self.checker.users = {} # violates abstraction, oh well
844 for user, passwd in bots:
845 self.checker.addUser(user, passwd)
846 self.checker.addUser("change", "changepw")
848 # identify new/old bots
849 old = self.bots; oldnames = [name for name,pw in old]
850 new = bots; newnames = [name for name,pw in new]
851 # removeSlave will hang up on the old bot
852 dl = [self.botmaster.removeSlave(name)
853 for name in oldnames if name not in newnames]
854 [self.botmaster.addSlave(name)
855 for name in newnames if name not in oldnames]
857 # all done
858 self.bots = bots
859 return defer.DeferredList(dl, fireOnOneErrback=1, consumeErrors=0)
861 def loadConfig_Sources(self, sources):
862 log.msg("loadConfig_Sources, change_svc is", self.change_svc,
863 self.change_svc.parent)
864 # shut down any that were removed, start any that were added
865 deleted_sources = [s for s in self.change_svc if s not in sources]
866 added_sources = [s for s in sources if s not in self.change_svc]
867 dl = [self.change_svc.removeSource(s) for s in deleted_sources]
868 def addNewOnes(res):
869 [self.change_svc.addSource(s) for s in added_sources]
870 d = defer.DeferredList(dl, fireOnOneErrback=1, consumeErrors=0)
871 d.addCallback(addNewOnes)
872 return d
874 def allSchedulers(self):
875 # TODO: when twisted-1.3 compatibility is dropped, switch to the
876 # providedBy form, because it's faster (no actual adapter lookup)
877 return [child for child in self
878 #if interfaces.IScheduler.providedBy(child)]
879 if interfaces.IScheduler(child, None)]
882 def loadConfig_Schedulers(self, newschedulers):
883 oldschedulers = self.allSchedulers()
884 removed = [s for s in oldschedulers if s not in newschedulers]
885 added = [s for s in newschedulers if s not in oldschedulers]
886 dl = [defer.maybeDeferred(s.disownServiceParent) for s in removed]
887 def addNewOnes(res):
888 for s in added:
889 s.setServiceParent(self)
890 d = defer.DeferredList(dl, fireOnOneErrback=1)
891 d.addCallback(addNewOnes)
892 return d
894 def loadConfig_Builders(self, newBuilderData):
895 somethingChanged = False
896 newList = {}
897 newBuilderNames = []
898 allBuilders = self.botmaster.builders.copy()
899 for data in newBuilderData:
900 name = data['name']
901 newList[name] = data
902 newBuilderNames.append(name)
904 # identify all that were removed
905 for oldname in self.botmaster.getBuildernames():
906 if oldname not in newList:
907 log.msg("removing old builder %s" % oldname)
908 del allBuilders[oldname]
909 somethingChanged = True
910 # announce the change
911 self.status.builderRemoved(oldname)
913 # everything in newList is either unchanged, changed, or new
914 for name, data in newList.items():
915 old = self.botmaster.builders.get(name)
916 basedir = data['builddir'] # used on both master and slave
917 #name, slave, builddir, factory = data
918 if not old: # new
919 # category added after 0.6.2
920 category = data.get('category', None)
921 log.msg("adding new builder %s for category %s" %
922 (name, category))
923 statusbag = self.status.builderAdded(name, basedir, category)
924 builder = Builder(data, statusbag)
925 allBuilders[name] = builder
926 somethingChanged = True
927 elif old.compareToSetup(data):
928 # changed: try to minimize the disruption and only modify the
929 # pieces that really changed
930 diffs = old.compareToSetup(data)
931 log.msg("updating builder %s: %s" % (name, "\n".join(diffs)))
933 statusbag = old.builder_status
934 statusbag.saveYourself() # seems like a good idea
935 # TODO: if the basedir was changed, we probably need to make
936 # a new statusbag
937 new_builder = Builder(data, statusbag)
938 new_builder.consumeTheSoulOfYourPredecessor(old)
939 # that migrates any retained slavebuilders too
941 # point out that the builder was updated. On the Waterfall,
942 # this will appear just after any currently-running builds.
943 statusbag.addPointEvent(["config", "updated"])
945 allBuilders[name] = new_builder
946 somethingChanged = True
947 else:
948 # unchanged: leave it alone
949 log.msg("builder %s is unchanged" % name)
950 pass
952 if somethingChanged:
953 sortedAllBuilders = [allBuilders[name] for name in newBuilderNames]
954 d = self.botmaster.setBuilders(sortedAllBuilders)
955 return d
956 return None
958 def loadConfig_status(self, status):
959 dl = []
961 # remove old ones
962 for s in self.statusTargets[:]:
963 if not s in status:
964 log.msg("removing IStatusReceiver", s)
965 d = defer.maybeDeferred(s.disownServiceParent)
966 dl.append(d)
967 self.statusTargets.remove(s)
968 # after those are finished going away, add new ones
969 def addNewOnes(res):
970 for s in status:
971 if not s in self.statusTargets:
972 log.msg("adding IStatusReceiver", s)
973 s.setServiceParent(self)
974 self.statusTargets.append(s)
975 d = defer.DeferredList(dl, fireOnOneErrback=1)
976 d.addCallback(addNewOnes)
977 return d
980 def addChange(self, change):
981 for s in self.allSchedulers():
982 s.addChange(change)
984 def submitBuildSet(self, bs):
985 # determine the set of Builders to use
986 builders = []
987 for name in bs.builderNames:
988 b = self.botmaster.builders.get(name)
989 if b:
990 if b not in builders:
991 builders.append(b)
992 continue
993 # TODO: add aliases like 'all'
994 raise KeyError("no such builder named '%s'" % name)
996 # now tell the BuildSet to create BuildRequests for all those
997 # Builders and submit them
998 bs.start(builders)
999 self.status.buildsetSubmitted(bs.status)
1002 class Control:
1003 if implements:
1004 implements(interfaces.IControl)
1005 else:
1006 __implements__ = interfaces.IControl,
1008 def __init__(self, master):
1009 self.master = master
1011 def addChange(self, change):
1012 self.master.change_svc.addChange(change)
1014 def submitBuildSet(self, bs):
1015 self.master.submitBuildSet(bs)
1017 def getBuilder(self, name):
1018 b = self.master.botmaster.builders[name]
1019 return interfaces.IBuilderControl(b)
1021 components.registerAdapter(Control, BuildMaster, interfaces.IControl)
1023 # so anybody who can get a handle on the BuildMaster can cause a build with:
1024 # IControl(master).getBuilder("full-2.3").requestBuild(buildrequest)