1 # -*- test-case-name: buildbot.test.test_slaves -*-
3 from twisted
.trial
import unittest
4 from buildbot
.twcompat
import maybeWait
5 from twisted
.internet
import defer
, reactor
6 from twisted
.python
import log
8 from buildbot
.test
.runutils
import RunMixin
9 from buildbot
.sourcestamp
import SourceStamp
10 from buildbot
.process
.base
import BuildRequest
11 from buildbot
.status
.builder
import SUCCESS
12 from buildbot
.slave
import bot
15 from buildbot.process import factory
16 from buildbot.steps import dummy
19 BuildmasterConfig = c = {}
20 c['bots'] = [('bot1', 'sekrit'), ('bot2', 'sekrit'), ('bot3', 'sekrit')]
26 f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
27 f2 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=2)])
30 {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
31 'builddir': 'b1', 'factory': f1},
35 config_2
= config_1
+ """
38 {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
39 'builddir': 'b1', 'factory': f2},
44 class Slave(RunMixin
, unittest
.TestCase
):
48 self
.master
.loadConfig(config_1
)
49 self
.master
.startService()
50 d
= self
.connectSlave(["b1"])
51 d
.addCallback(lambda res
: self
.connectSlave(["b1"], "bot2"))
54 def doBuild(self
, buildername
):
55 br
= BuildRequest("forced", SourceStamp())
56 d
= br
.waitUntilFinished()
57 self
.control
.getBuilder(buildername
).requestBuild(br
)
60 def testSequence(self
):
61 # make sure both slaves appear in the list.
62 attached_slaves
= [c
for c
in self
.master
.botmaster
.slaves
.values()
64 self
.failUnlessEqual(len(attached_slaves
), 2)
65 b
= self
.master
.botmaster
.builders
["b1"]
66 self
.failUnlessEqual(len(b
.slaves
), 2)
68 # since the current scheduling algorithm is simple and does not
69 # rotate or attempt any sort of load-balancing, two builds in
70 # sequence should both use the first slave. This may change later if
71 # we move to a more sophisticated scheme.
73 d
= self
.doBuild("b1")
74 d
.addCallback(self
._testSequence
_1)
76 def _testSequence_1(self
, res
):
77 self
.failUnlessEqual(res
.getResults(), SUCCESS
)
78 self
.failUnlessEqual(res
.getSlavename(), "bot1")
80 d
= self
.doBuild("b1")
81 d
.addCallback(self
._testSequence
_2)
83 def _testSequence_2(self
, res
):
84 self
.failUnlessEqual(res
.getSlavename(), "bot1")
87 def testSimultaneous(self
):
88 # make sure we can actually run two builds at the same time
89 d1
= self
.doBuild("b1")
90 d2
= self
.doBuild("b1")
91 d1
.addCallback(self
._testSimultaneous
_1, d2
)
93 def _testSimultaneous_1(self
, res
, d2
):
94 self
.failUnlessEqual(res
.getResults(), SUCCESS
)
95 self
.failUnlessEqual(res
.getSlavename(), "bot1")
96 d2
.addCallback(self
._testSimultaneous
_2)
98 def _testSimultaneous_2(self
, res
):
99 self
.failUnlessEqual(res
.getResults(), SUCCESS
)
100 self
.failUnlessEqual(res
.getSlavename(), "bot2")
102 def testFallback1(self
):
103 # detach the first slave, verify that a build is run using the second
105 d
= self
.shutdownSlave("bot1", "b1")
106 d
.addCallback(self
._testFallback
1_1)
108 def _testFallback1_1(self
, res
):
109 attached_slaves
= [c
for c
in self
.master
.botmaster
.slaves
.values()
111 self
.failUnlessEqual(len(attached_slaves
), 1)
112 self
.failUnlessEqual(len(self
.master
.botmaster
.builders
["b1"].slaves
),
114 d
= self
.doBuild("b1")
115 d
.addCallback(self
._testFallback
1_2)
117 def _testFallback1_2(self
, res
):
118 self
.failUnlessEqual(res
.getResults(), SUCCESS
)
119 self
.failUnlessEqual(res
.getSlavename(), "bot2")
121 def testFallback2(self
):
122 # Disable the first slave, so that a slaveping will timeout. Then
123 # start a build, and verify that the non-failing (second) one is
124 # claimed for the build, and that the failing one is removed from the
127 # reduce the ping time so we'll failover faster
128 self
.master
.botmaster
.builders
["b1"].START_BUILD_TIMEOUT
= 1
129 self
.disappearSlave("bot1", "b1")
130 d
= self
.doBuild("b1")
131 d
.addCallback(self
._testFallback
2_1)
133 def _testFallback2_1(self
, res
):
134 self
.failUnlessEqual(res
.getResults(), SUCCESS
)
135 self
.failUnlessEqual(res
.getSlavename(), "bot2")
136 b1slaves
= self
.master
.botmaster
.builders
["b1"].slaves
137 self
.failUnlessEqual(len(b1slaves
), 1)
138 self
.failUnlessEqual(b1slaves
[0].slave
.slavename
, "bot2")
141 def notFinished(self
, brs
):
143 builds
= brs
.getBuilds()
144 self
.failIf(len(builds
) > 1)
146 self
.failIf(builds
[0].isFinished())
148 def testDontClaimPingingSlave(self
):
149 # have two slaves connect for the same builder. Do something to the
150 # first one so that slavepings are delayed (but do not fail
153 self
.slaves
['bot1'].debugOpts
["stallPings"] = (10, timers
)
154 br
= BuildRequest("forced", SourceStamp())
155 d1
= br
.waitUntilFinished()
156 self
.control
.getBuilder("b1").requestBuild(br
)
157 s1
= br
.status
# this is a BuildRequestStatus
158 # give it a chance to start pinging
159 d2
= defer
.Deferred()
160 d2
.addCallback(self
._testDontClaimPingingSlave
_1, d1
, s1
, timers
)
161 reactor
.callLater(1, d2
.callback
, None)
163 def _testDontClaimPingingSlave_1(self
, res
, d1
, s1
, timers
):
164 # now the first build is running (waiting on the ping), so start the
165 # second build. This should claim the second slave, not the first,
166 # because the first is busy doing the ping.
168 d3
= self
.doBuild("b1")
169 d3
.addCallback(self
._testDontClaimPingingSlave
_2, d1
, s1
, timers
)
171 def _testDontClaimPingingSlave_2(self
, res
, d1
, s1
, timers
):
172 self
.failUnlessEqual(res
.getSlavename(), "bot2")
174 # now let the ping complete
175 self
.failUnlessEqual(len(timers
), 1)
177 d1
.addCallback(self
._testDontClaimPingingSlave
_3)
179 def _testDontClaimPingingSlave_3(self
, res
):
180 self
.failUnlessEqual(res
.getSlavename(), "bot1")
183 from buildbot.process import factory
184 from buildbot.steps import dummy
187 BuildmasterConfig = c = {}
188 c['bots'] = [('bot1', 'sekrit')]
191 c['slavePortnum'] = 0
194 f1 = factory.BuildFactory([s(dummy.Wait, handle='one')])
195 f2 = factory.BuildFactory([s(dummy.Wait, handle='two')])
196 f3 = factory.BuildFactory([s(dummy.Wait, handle='three')])
199 {'name': 'b1', 'slavenames': ['bot1'],
200 'builddir': 'b1', 'factory': f1},
204 config_4
= config_3
+ """
206 {'name': 'b1', 'slavenames': ['bot1'],
207 'builddir': 'b1', 'factory': f2},
211 config_5
= config_3
+ """
213 {'name': 'b1', 'slavenames': ['bot1'],
214 'builddir': 'b1', 'factory': f3},
218 from buildbot
.slave
.commands
import waitCommandRegistry
220 class Reconfig(RunMixin
, unittest
.TestCase
):
224 self
.master
.loadConfig(config_3
)
225 self
.master
.startService()
226 d
= self
.connectSlave(["b1"])
229 def _one_started(self
):
230 log
.msg("testReconfig._one_started")
231 self
.build1_started
= True
232 self
.d1
.callback(None)
235 def _two_started(self
):
236 log
.msg("testReconfig._two_started")
237 self
.build2_started
= True
238 self
.d3
.callback(None)
241 def _three_started(self
):
242 log
.msg("testReconfig._three_started")
243 self
.build3_started
= True
244 self
.d5
.callback(None)
247 def testReconfig(self
):
248 # reconfiguring a Builder should not interrupt any running Builds. No
249 # queued BuildRequests should be lost. The next Build started should
250 # use the new process.
251 slave1
= self
.slaves
['bot1']
252 bot1
= slave1
.getServiceNamed('bot')
253 sb1
= bot1
.builders
['b1']
254 self
.failUnless(isinstance(sb1
, bot
.SlaveBuilder
))
255 self
.failUnless(sb1
.running
)
256 b1
= self
.master
.botmaster
.builders
['b1']
259 self
.d1
= d1
= defer
.Deferred()
260 self
.d2
= d2
= defer
.Deferred()
261 self
.d3
, self
.d4
= defer
.Deferred(), defer
.Deferred()
262 self
.d5
, self
.d6
= defer
.Deferred(), defer
.Deferred()
263 self
.build1_started
= False
264 self
.build2_started
= False
265 self
.build3_started
= False
266 waitCommandRegistry
[("one","build1")] = self
._one
_started
267 waitCommandRegistry
[("two","build2")] = self
._two
_started
268 waitCommandRegistry
[("three","build3")] = self
._three
_started
270 # use different branches to make sure these cannot be merged
271 br1
= BuildRequest("build1", SourceStamp(branch
="1"))
272 b1
.submitBuildRequest(br1
)
273 br2
= BuildRequest("build2", SourceStamp(branch
="2"))
274 b1
.submitBuildRequest(br2
)
275 br3
= BuildRequest("build3", SourceStamp(branch
="3"))
276 b1
.submitBuildRequest(br3
)
277 self
.requests
= (br1
, br2
, br3
)
278 # all three are now in the queue
280 # wait until the first one has started
281 d1
.addCallback(self
._testReconfig
_2)
284 def _testReconfig_2(self
, res
):
285 log
.msg("_testReconfig_2")
286 # confirm that it is building
287 brs
= self
.requests
[0].status
.getBuilds()
288 self
.failUnlessEqual(len(brs
), 1)
290 self
.failUnlessEqual(self
.build1
.getCurrentStep().getName(), "wait")
291 # br1 is building, br2 and br3 are in the queue (in that order). Now
292 # we reconfigure the Builder.
293 self
.failUnless(self
.build1_started
)
294 d
= self
.master
.loadConfig(config_4
)
295 d
.addCallback(self
._testReconfig
_3)
298 def _testReconfig_3(self
, res
):
299 log
.msg("_testReconfig_3")
300 # now check to see that br1 is still building, and that br2 and br3
301 # are in the queue of the new builder
302 b1
= self
.master
.botmaster
.builders
['b1']
303 self
.failIfIdentical(b1
, self
.orig_b1
)
304 self
.failIf(self
.build1
.isFinished())
305 self
.failUnlessEqual(self
.build1
.getCurrentStep().getName(), "wait")
306 self
.failUnlessEqual(len(b1
.buildable
), 2)
307 self
.failUnless(self
.requests
[1] in b1
.buildable
)
308 self
.failUnless(self
.requests
[2] in b1
.buildable
)
310 # allow br1 to finish, and make sure its status is delivered normally
311 d
= self
.requests
[0].waitUntilFinished()
312 d
.addCallback(self
._testReconfig
_4)
313 self
.d2
.callback(None)
316 def _testReconfig_4(self
, bs
):
317 log
.msg("_testReconfig_4")
318 self
.failUnlessEqual(bs
.getReason(), "build1")
319 self
.failUnless(bs
.isFinished())
320 self
.failUnlessEqual(bs
.getResults(), SUCCESS
)
322 # at this point, the first build has finished, and there is a pending
323 # call to start the second build. Once that pending call fires, there
324 # is a network roundtrip before the 'wait' RemoteCommand is delivered
325 # to the slave. We need to wait for both events to happen before we
326 # can check to make sure it is using the correct process. Just wait a
329 d
.addCallback(self
._testReconfig
_5)
330 reactor
.callLater(1, d
.callback
, None)
333 def _testReconfig_5(self
, res
):
334 log
.msg("_testReconfig_5")
335 # at this point the next build ought to be running
336 b1
= self
.master
.botmaster
.builders
['b1']
337 self
.failUnlessEqual(len(b1
.buildable
), 1)
338 self
.failUnless(self
.requests
[2] in b1
.buildable
)
339 self
.failUnlessEqual(len(b1
.building
), 1)
340 # and it ought to be using the new process
341 self
.failUnless(self
.build2_started
)
343 # now, while the second build is running, change the config multiple
346 d
= self
.master
.loadConfig(config_3
)
347 d
.addCallback(lambda res
: self
.master
.loadConfig(config_4
))
348 d
.addCallback(lambda res
: self
.master
.loadConfig(config_5
))
350 # then once that's done, allow the second build to finish and
351 # wait for it to complete
352 da
= self
.requests
[1].waitUntilFinished()
353 self
.d4
.callback(None)
357 # and once *that*'s done, wait another second to let the third
359 db
= defer
.Deferred()
360 reactor
.callLater(1, db
.callback
, None)
362 d
.addCallback(_done2
)
363 d
.addCallback(self
._testReconfig
_6)
366 def _testReconfig_6(self
, res
):
367 log
.msg("_testReconfig_6")
368 # now check to see that the third build is running
369 self
.failUnless(self
.build3_started
)
375 class Slave2(RunMixin
, unittest
.TestCase
):
381 self
.master
.loadConfig(config_1
)
382 self
.master
.startService()
384 def doBuild(self
, buildername
, reason
="forced"):
385 # we need to prevent these builds from being merged, so we create
386 # each of them with a different revision specifier. The revision is
387 # ignored because our build process does not have a source checkout
390 br
= BuildRequest(reason
, SourceStamp(revision
=self
.revision
))
391 d
= br
.waitUntilFinished()
392 self
.control
.getBuilder(buildername
).requestBuild(br
)
395 def testFirstComeFirstServed(self
):
396 # submit three builds, then connect a slave which fails the
397 # slaveping. The first build will claim the slave, do the slaveping,
398 # give up, and re-queue the build. Verify that the build gets
399 # re-queued in front of all other builds. This may be tricky, because
400 # the other builds may attempt to claim the just-failed slave.
402 d1
= self
.doBuild("b1", "first")
403 d2
= self
.doBuild("b1", "second")
404 #buildable = self.master.botmaster.builders["b1"].buildable
405 #print [b.reason for b in buildable]
407 # specifically, I want the poor build to get precedence over any
408 # others that were waiting. To test this, we need more builds than
411 # now connect a broken slave. The first build started as soon as it
412 # connects, so by the time we get to our _1 method, the ill-fated
413 # build has already started.
414 d
= self
.connectSlave(["b1"], opts
={"failPingOnce": True})
415 d
.addCallback(self
._testFirstComeFirstServed
_1, d1
, d2
)
417 def _testFirstComeFirstServed_1(self
, res
, d1
, d2
):
418 # the master has send the slaveping. When this is received, it will
419 # fail, causing the master to hang up on the slave. When it
420 # reconnects, it should find the first build at the front of the
421 # queue. If we simply wait for both builds to complete, then look at
422 # the status logs, we should see that the builds ran in the correct
425 d
= defer
.DeferredList([d1
,d2
])
426 d
.addCallback(self
._testFirstComeFirstServed
_2)
428 def _testFirstComeFirstServed_2(self
, res
):
429 b
= self
.status
.getBuilder("b1")
430 builds
= b
.getBuild(0), b
.getBuild(1)
431 reasons
= [build
.getReason() for build
in builds
]
432 self
.failUnlessEqual(reasons
, ["first", "second"])