1 # -*- test-case-name: buildbot.test.test_locks -*-
5 from twisted
.trial
import unittest
6 from twisted
.internet
import defer
, reactor
8 from buildbot
import master
9 from buildbot
.steps
import dummy
10 from buildbot
.sourcestamp
import SourceStamp
11 from buildbot
.process
.base
import BuildRequest
12 from buildbot
.test
.runutils
import RunMixin
13 from buildbot
import locks
15 def claimHarder(lock
, owner
):
16 """Return a Deferred that will fire when the lock is claimed. Keep trying
18 if lock
.isAvailable():
19 #print "claimHarder(%s): claiming" % owner
21 return defer
.succeed(lock
)
22 #print "claimHarder(%s): waiting" % owner
23 d
= lock
.waitUntilMaybeAvailable(owner
)
24 d
.addCallback(claimHarder
, owner
)
27 def hold(lock
, owner
, mode
="now"):
30 elif mode
== "very soon":
31 reactor
.callLater(0, lock
.release
, owner
)
33 reactor
.callLater(0.1, lock
.release
, owner
)
36 class Unit(unittest
.TestCase
):
38 l
= locks
.BaseLock("name")
39 self
.failUnless(l
.isAvailable())
41 self
.failIf(l
.isAvailable())
43 self
.failUnless(l
.isAvailable())
46 lock
= locks
.BaseLock("name")
47 d
= claimHarder(lock
, "owner1")
48 d
.addCallback(lambda lock
: lock
.release("owner1"))
51 def testCompetition(self
):
52 lock
= locks
.BaseLock("name")
53 d
= claimHarder(lock
, "owner1")
54 d
.addCallback(self
._claim
1)
56 def _claim1(self
, lock
):
57 # we should have claimed it by now
58 self
.failIf(lock
.isAvailable())
59 # now set up two competing owners. We don't know which will get the
61 d2
= claimHarder(lock
, "owner2")
62 d2
.addCallback(hold
, "owner2", "now")
63 d3
= claimHarder(lock
, "owner3")
64 d3
.addCallback(hold
, "owner3", "soon")
65 dl
= defer
.DeferredList([d2
,d3
])
66 dl
.addCallback(self
._cleanup
, lock
)
67 # and release the lock in a moment
68 reactor
.callLater(0.1, lock
.release
, "owner1")
71 def _cleanup(self
, res
, lock
):
72 d
= claimHarder(lock
, "cleanup")
73 d
.addCallback(lambda lock
: lock
.release("cleanup"))
77 lock
= locks
.BaseLock("name")
81 mode
= random
.choice(["now", "very soon", "soon"])
82 d
= claimHarder(lock
, owner
)
83 d
.addCallback(hold
, owner
, mode
)
85 d
= defer
.DeferredList(dl
)
86 d
.addCallback(self
._cleanup
, lock
)
89 class Multi(unittest
.TestCase
):
91 lock
= locks
.BaseLock("name", 2)
92 self
.failUnless(lock
.isAvailable())
94 self
.failUnless(lock
.isAvailable())
96 self
.failIf(lock
.isAvailable())
97 lock
.release("owner1")
98 self
.failUnless(lock
.isAvailable())
99 lock
.release("owner2")
100 self
.failUnless(lock
.isAvailable())
103 lock
= locks
.BaseLock("name", 2)
106 d
= claimHarder(lock
, "owner3")
107 d
.addCallback(lambda lock
: lock
.release("owner3"))
108 lock
.release("owner2")
109 lock
.release("owner1")
112 def _cleanup(self
, res
, lock
, count
):
114 for i
in range(count
):
115 d
= claimHarder(lock
, "cleanup%d" % i
)
117 d2
= defer
.DeferredList(dl
)
118 # once all locks are claimed, we know that any previous owners have
121 for i
in range(count
):
122 lock
.release("cleanup%d" % i
)
123 d2
.addCallback(_release
)
126 def testRandom(self
):
128 lock
= locks
.BaseLock("name", COUNT
)
131 owner
= "owner%d" % i
132 mode
= random
.choice(["now", "very soon", "soon"])
133 d
= claimHarder(lock
, owner
)
135 self
.failIf(len(lock
.owners
) > COUNT
)
137 d
.addCallback(_check
)
138 d
.addCallback(hold
, owner
, mode
)
140 d
= defer
.DeferredList(dl
)
141 d
.addCallback(self
._cleanup
, lock
, COUNT
)
147 def slave(slavename
):
148 slavebuilder
= Dummy()
149 slavebuilder
.slave
= Dummy()
150 slavebuilder
.slave
.slavename
= slavename
153 class MakeRealLock(unittest
.TestCase
):
155 def make(self
, lockid
):
156 return lockid
.lockClass(lockid
)
158 def testMaster(self
):
159 mid1
= locks
.MasterLock("name1")
160 mid2
= locks
.MasterLock("name1")
161 mid3
= locks
.MasterLock("name3")
162 mid4
= locks
.MasterLock("name1", 3)
163 self
.failUnlessEqual(mid1
, mid2
)
164 self
.failIfEqual(mid1
, mid3
)
165 # they should all be hashable
166 d
= {mid1
: 1, mid2
: 2, mid3
: 3, mid4
: 4}
169 self
.failUnlessEqual(l1
.name
, "name1")
170 self
.failUnlessEqual(l1
.maxCount
, 1)
171 self
.failUnlessIdentical(l1
.getLock(slave("slave1")), l1
)
173 self
.failUnlessEqual(l4
.name
, "name1")
174 self
.failUnlessEqual(l4
.maxCount
, 3)
175 self
.failUnlessIdentical(l4
.getLock(slave("slave1")), l4
)
178 sid1
= locks
.SlaveLock("name1")
179 sid2
= locks
.SlaveLock("name1")
180 sid3
= locks
.SlaveLock("name3")
181 sid4
= locks
.SlaveLock("name1", maxCount
=3)
182 mcfs
= {"bigslave": 4, "smallslave": 1}
183 sid5
= locks
.SlaveLock("name1", maxCount
=3, maxCountForSlave
=mcfs
)
184 mcfs2
= {"bigslave": 4, "smallslave": 1}
185 sid5a
= locks
.SlaveLock("name1", maxCount
=3, maxCountForSlave
=mcfs2
)
186 mcfs3
= {"bigslave": 1, "smallslave": 99}
187 sid5b
= locks
.SlaveLock("name1", maxCount
=3, maxCountForSlave
=mcfs3
)
188 self
.failUnlessEqual(sid1
, sid2
)
189 self
.failIfEqual(sid1
, sid3
)
190 self
.failIfEqual(sid1
, sid4
)
191 self
.failIfEqual(sid1
, sid5
)
192 self
.failUnlessEqual(sid5
, sid5a
)
193 self
.failIfEqual(sid5a
, sid5b
)
194 # they should all be hashable
195 d
= {sid1
: 1, sid2
: 2, sid3
: 3, sid4
: 4, sid5
: 5, sid5a
: 6, sid5b
: 7}
198 self
.failUnlessEqual(l1
.name
, "name1")
199 self
.failUnlessEqual(l1
.maxCount
, 1)
200 l1s1
= l1
.getLock(slave("slave1"))
201 self
.failIfIdentical(l1s1
, l1
)
204 self
.failUnlessEqual(l4
.maxCount
, 3)
205 l4s1
= l4
.getLock(slave("slave1"))
206 self
.failUnlessEqual(l4s1
.maxCount
, 3)
209 l5s1
= l5
.getLock(slave("bigslave"))
210 l5s2
= l5
.getLock(slave("smallslave"))
211 l5s3
= l5
.getLock(slave("unnamedslave"))
212 self
.failUnlessEqual(l5s1
.maxCount
, 4)
213 self
.failUnlessEqual(l5s2
.maxCount
, 1)
214 self
.failUnlessEqual(l5s3
.maxCount
, 3)
216 class GetLock(unittest
.TestCase
):
218 # the master.cfg file contains "lock ids", which are instances of
219 # MasterLock and SlaveLock but which are not actually Locks per se.
220 # When the build starts, these markers are turned into RealMasterLock
221 # and RealSlaveLock instances. This insures that any builds running
222 # on slaves that were unaffected by the config change are still
223 # referring to the same Lock instance as new builds by builders that
224 # *were* affected by the change. There have been bugs in the past in
225 # which this didn't happen, and the Locks were bypassed because half
226 # the builders were using one incarnation of the lock while the other
227 # half were using a separate (but equal) incarnation.
229 # Changing the lock id in any way should cause it to be replaced in
230 # the BotMaster. This will result in a couple of funky artifacts:
231 # builds in progress might pay attention to a different lock, so we
232 # might bypass the locking for the duration of a couple builds.
233 # There's also the problem of old Locks lingering around in
234 # BotMaster.locks, but they're small and shouldn't really cause a
237 b
= master
.BotMaster()
238 l1
= locks
.MasterLock("one")
239 l1a
= locks
.MasterLock("one")
240 l2
= locks
.MasterLock("one", maxCount
=4)
242 rl1
= b
.getLockByID(l1
)
243 rl2
= b
.getLockByID(l1a
)
244 self
.failUnlessIdentical(rl1
, rl2
)
245 rl3
= b
.getLockByID(l2
)
246 self
.failIfIdentical(rl1
, rl3
)
248 s1
= locks
.SlaveLock("one")
249 s1a
= locks
.SlaveLock("one")
250 s2
= locks
.SlaveLock("one", maxCount
=4)
251 s3
= locks
.SlaveLock("one", maxCount
=4,
252 maxCountForSlave
={"a":1, "b":2})
253 s3a
= locks
.SlaveLock("one", maxCount
=4,
254 maxCountForSlave
={"a":1, "b":2})
255 s4
= locks
.SlaveLock("one", maxCount
=4,
256 maxCountForSlave
={"a":4, "b":4})
258 rl1
= b
.getLockByID(s1
)
259 rl2
= b
.getLockByID(s1a
)
260 self
.failUnlessIdentical(rl1
, rl2
)
261 rl3
= b
.getLockByID(s2
)
262 self
.failIfIdentical(rl1
, rl3
)
263 rl4
= b
.getLockByID(s3
)
264 self
.failIfIdentical(rl1
, rl4
)
265 self
.failIfIdentical(rl3
, rl4
)
266 rl5
= b
.getLockByID(s3a
)
267 self
.failUnlessIdentical(rl4
, rl5
)
268 rl6
= b
.getLockByID(s4
)
269 self
.failIfIdentical(rl5
, rl6
)
273 class LockStep(dummy
.Dummy
):
275 number
= self
.build
.requests
[0].number
276 self
.build
.requests
[0].events
.append(("start", number
))
277 dummy
.Dummy
.start(self
)
279 number
= self
.build
.requests
[0].number
280 self
.build
.requests
[0].events
.append(("done", number
))
281 dummy
.Dummy
.done(self
)
284 from buildbot import locks
285 from buildbot.process import factory
286 from buildbot.buildslave import BuildSlave
288 from buildbot.test.test_locks import LockStep
290 BuildmasterConfig = c = {}
291 c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
293 c['slavePortnum'] = 0
295 first_lock = locks.SlaveLock('first')
296 second_lock = locks.MasterLock('second')
297 f1 = factory.BuildFactory([s(LockStep, timeout=2, locks=[first_lock])])
298 f2 = factory.BuildFactory([s(LockStep, timeout=3, locks=[second_lock])])
299 f3 = factory.BuildFactory([s(LockStep, timeout=2, locks=[])])
301 b1a = {'name': 'full1a', 'slavename': 'bot1', 'builddir': '1a', 'factory': f1}
302 b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1b', 'factory': f1}
303 b1c = {'name': 'full1c', 'slavename': 'bot1', 'builddir': '1c', 'factory': f3,
304 'locks': [first_lock, second_lock]}
305 b1d = {'name': 'full1d', 'slavename': 'bot1', 'builddir': '1d', 'factory': f2}
306 b2a = {'name': 'full2a', 'slavename': 'bot2', 'builddir': '2a', 'factory': f1}
307 b2b = {'name': 'full2b', 'slavename': 'bot2', 'builddir': '2b', 'factory': f3,
308 'locks': [second_lock]}
309 c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
312 config_1a
= config_1
+ \
314 b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1B', 'factory': f1}
315 c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
319 class Locks(RunMixin
, unittest
.TestCase
):
322 self
.req1
= req1
= BuildRequest("forced build", SourceStamp())
324 self
.req2
= req2
= BuildRequest("forced build", SourceStamp())
326 self
.req3
= req3
= BuildRequest("forced build", SourceStamp())
328 req1
.events
= req2
.events
= req3
.events
= self
.events
= []
329 d
= self
.master
.loadConfig(config_1
)
330 d
.addCallback(lambda res
: self
.master
.startService())
331 d
.addCallback(lambda res
: self
.connectSlaves(["bot1", "bot2"],
334 "full2a", "full2b"]))
338 self
.control
.getBuilder("full1a").requestBuild(self
.req1
)
339 self
.control
.getBuilder("full1b").requestBuild(self
.req2
)
340 d
= defer
.DeferredList([self
.req1
.waitUntilFinished(),
341 self
.req2
.waitUntilFinished()])
342 d
.addCallback(self
._testLock
1_1)
345 def _testLock1_1(self
, res
):
346 # full1a should complete its step before full1b starts it
347 self
.failUnlessEqual(self
.events
,
348 [("start", 1), ("done", 1),
349 ("start", 2), ("done", 2)])
351 def testLock1a(self
):
352 # just like testLock1, but we reload the config file first, with a
353 # change that causes full1b to be changed. This tickles a design bug
354 # in which full1a and full1b wind up with distinct Lock instances.
355 d
= self
.master
.loadConfig(config_1a
)
356 d
.addCallback(self
._testLock
1a
_1)
358 def _testLock1a_1(self
, res
):
359 self
.control
.getBuilder("full1a").requestBuild(self
.req1
)
360 self
.control
.getBuilder("full1b").requestBuild(self
.req2
)
361 d
= defer
.DeferredList([self
.req1
.waitUntilFinished(),
362 self
.req2
.waitUntilFinished()])
363 d
.addCallback(self
._testLock
1a
_2)
366 def _testLock1a_2(self
, res
):
367 # full1a should complete its step before full1b starts it
368 self
.failUnlessEqual(self
.events
,
369 [("start", 1), ("done", 1),
370 ("start", 2), ("done", 2)])
373 # two builds run on separate slaves with slave-scoped locks should
375 self
.control
.getBuilder("full1a").requestBuild(self
.req1
)
376 self
.control
.getBuilder("full2a").requestBuild(self
.req2
)
377 d
= defer
.DeferredList([self
.req1
.waitUntilFinished(),
378 self
.req2
.waitUntilFinished()])
379 d
.addCallback(self
._testLock
2_1)
382 def _testLock2_1(self
, res
):
383 # full2a should start its step before full1a finishes it. They run on
384 # different slaves, however, so they might start in either order.
385 self
.failUnless(self
.events
[:2] == [("start", 1), ("start", 2)] or
386 self
.events
[:2] == [("start", 2), ("start", 1)])
389 # two builds run on separate slaves with master-scoped locks should
391 self
.control
.getBuilder("full1c").requestBuild(self
.req1
)
392 self
.control
.getBuilder("full2b").requestBuild(self
.req2
)
393 d
= defer
.DeferredList([self
.req1
.waitUntilFinished(),
394 self
.req2
.waitUntilFinished()])
395 d
.addCallback(self
._testLock
3_1)
398 def _testLock3_1(self
, res
):
399 # full2b should not start until after full1c finishes. The builds run
400 # on different slaves, so we can't really predict which will start
401 # first. The important thing is that they don't overlap.
402 self
.failUnless(self
.events
== [("start", 1), ("done", 1),
403 ("start", 2), ("done", 2)]
404 or self
.events
== [("start", 2), ("done", 2),
405 ("start", 1), ("done", 1)]
409 self
.control
.getBuilder("full1a").requestBuild(self
.req1
)
410 self
.control
.getBuilder("full1c").requestBuild(self
.req2
)
411 self
.control
.getBuilder("full1d").requestBuild(self
.req3
)
412 d
= defer
.DeferredList([self
.req1
.waitUntilFinished(),
413 self
.req2
.waitUntilFinished(),
414 self
.req3
.waitUntilFinished()])
415 d
.addCallback(self
._testLock
4_1)
418 def _testLock4_1(self
, res
):
419 # full1a starts, then full1d starts (because they do not interfere).
420 # Once both are done, full1c can run.
421 self
.failUnlessEqual(self
.events
,
422 [("start", 1), ("start", 3),
423 ("done", 1), ("done", 3),
424 ("start", 2), ("done", 2)])