WebStatus: yes create public_html/ at startup, otherwise we get internal server error...
[buildbot.git] / buildbot / test / test_locks.py
blob812f89667c62e9dc3a235805743dccbe2ac7a90c
1 # -*- test-case-name: buildbot.test.test_locks -*-
3 import random
5 from twisted.trial import unittest
6 from twisted.internet import defer, reactor
8 from buildbot import master
9 from buildbot.steps import dummy
10 from buildbot.sourcestamp import SourceStamp
11 from buildbot.process.base import BuildRequest
12 from buildbot.test.runutils import RunMixin
13 from buildbot import locks
15 def claimHarder(lock, owner):
16 """Return a Deferred that will fire when the lock is claimed. Keep trying
17 until we succeed."""
18 if lock.isAvailable():
19 #print "claimHarder(%s): claiming" % owner
20 lock.claim(owner)
21 return defer.succeed(lock)
22 #print "claimHarder(%s): waiting" % owner
23 d = lock.waitUntilMaybeAvailable(owner)
24 d.addCallback(claimHarder, owner)
25 return d
27 def hold(lock, owner, mode="now"):
28 if mode == "now":
29 lock.release(owner)
30 elif mode == "very soon":
31 reactor.callLater(0, lock.release, owner)
32 elif mode == "soon":
33 reactor.callLater(0.1, lock.release, owner)
36 class Unit(unittest.TestCase):
37 def testNow(self):
38 l = locks.BaseLock("name")
39 self.failUnless(l.isAvailable())
40 l.claim("owner1")
41 self.failIf(l.isAvailable())
42 l.release("owner1")
43 self.failUnless(l.isAvailable())
45 def testLater(self):
46 lock = locks.BaseLock("name")
47 d = claimHarder(lock, "owner1")
48 d.addCallback(lambda lock: lock.release("owner1"))
49 return d
51 def testCompetition(self):
52 lock = locks.BaseLock("name")
53 d = claimHarder(lock, "owner1")
54 d.addCallback(self._claim1)
55 return d
56 def _claim1(self, lock):
57 # we should have claimed it by now
58 self.failIf(lock.isAvailable())
59 # now set up two competing owners. We don't know which will get the
60 # lock first.
61 d2 = claimHarder(lock, "owner2")
62 d2.addCallback(hold, "owner2", "now")
63 d3 = claimHarder(lock, "owner3")
64 d3.addCallback(hold, "owner3", "soon")
65 dl = defer.DeferredList([d2,d3])
66 dl.addCallback(self._cleanup, lock)
67 # and release the lock in a moment
68 reactor.callLater(0.1, lock.release, "owner1")
69 return dl
71 def _cleanup(self, res, lock):
72 d = claimHarder(lock, "cleanup")
73 d.addCallback(lambda lock: lock.release("cleanup"))
74 return d
76 def testRandom(self):
77 lock = locks.BaseLock("name")
78 dl = []
79 for i in range(100):
80 owner = "owner%d" % i
81 mode = random.choice(["now", "very soon", "soon"])
82 d = claimHarder(lock, owner)
83 d.addCallback(hold, owner, mode)
84 dl.append(d)
85 d = defer.DeferredList(dl)
86 d.addCallback(self._cleanup, lock)
87 return d
89 class Multi(unittest.TestCase):
90 def testNow(self):
91 lock = locks.BaseLock("name", 2)
92 self.failUnless(lock.isAvailable())
93 lock.claim("owner1")
94 self.failUnless(lock.isAvailable())
95 lock.claim("owner2")
96 self.failIf(lock.isAvailable())
97 lock.release("owner1")
98 self.failUnless(lock.isAvailable())
99 lock.release("owner2")
100 self.failUnless(lock.isAvailable())
102 def testLater(self):
103 lock = locks.BaseLock("name", 2)
104 lock.claim("owner1")
105 lock.claim("owner2")
106 d = claimHarder(lock, "owner3")
107 d.addCallback(lambda lock: lock.release("owner3"))
108 lock.release("owner2")
109 lock.release("owner1")
110 return d
112 def _cleanup(self, res, lock, count):
113 dl = []
114 for i in range(count):
115 d = claimHarder(lock, "cleanup%d" % i)
116 dl.append(d)
117 d2 = defer.DeferredList(dl)
118 # once all locks are claimed, we know that any previous owners have
119 # been flushed out
120 def _release(res):
121 for i in range(count):
122 lock.release("cleanup%d" % i)
123 d2.addCallback(_release)
124 return d2
126 def testRandom(self):
127 COUNT = 5
128 lock = locks.BaseLock("name", COUNT)
129 dl = []
130 for i in range(100):
131 owner = "owner%d" % i
132 mode = random.choice(["now", "very soon", "soon"])
133 d = claimHarder(lock, owner)
134 def _check(lock):
135 self.failIf(len(lock.owners) > COUNT)
136 return lock
137 d.addCallback(_check)
138 d.addCallback(hold, owner, mode)
139 dl.append(d)
140 d = defer.DeferredList(dl)
141 d.addCallback(self._cleanup, lock, COUNT)
142 return d
144 class Dummy:
145 pass
147 def slave(slavename):
148 slavebuilder = Dummy()
149 slavebuilder.slave = Dummy()
150 slavebuilder.slave.slavename = slavename
151 return slavebuilder
153 class MakeRealLock(unittest.TestCase):
155 def make(self, lockid):
156 return lockid.lockClass(lockid)
158 def testMaster(self):
159 mid1 = locks.MasterLock("name1")
160 mid2 = locks.MasterLock("name1")
161 mid3 = locks.MasterLock("name3")
162 mid4 = locks.MasterLock("name1", 3)
163 self.failUnlessEqual(mid1, mid2)
164 self.failIfEqual(mid1, mid3)
165 # they should all be hashable
166 d = {mid1: 1, mid2: 2, mid3: 3, mid4: 4}
168 l1 = self.make(mid1)
169 self.failUnlessEqual(l1.name, "name1")
170 self.failUnlessEqual(l1.maxCount, 1)
171 self.failUnlessIdentical(l1.getLock(slave("slave1")), l1)
172 l4 = self.make(mid4)
173 self.failUnlessEqual(l4.name, "name1")
174 self.failUnlessEqual(l4.maxCount, 3)
175 self.failUnlessIdentical(l4.getLock(slave("slave1")), l4)
177 def testSlave(self):
178 sid1 = locks.SlaveLock("name1")
179 sid2 = locks.SlaveLock("name1")
180 sid3 = locks.SlaveLock("name3")
181 sid4 = locks.SlaveLock("name1", maxCount=3)
182 mcfs = {"bigslave": 4, "smallslave": 1}
183 sid5 = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs)
184 mcfs2 = {"bigslave": 4, "smallslave": 1}
185 sid5a = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs2)
186 mcfs3 = {"bigslave": 1, "smallslave": 99}
187 sid5b = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs3)
188 self.failUnlessEqual(sid1, sid2)
189 self.failIfEqual(sid1, sid3)
190 self.failIfEqual(sid1, sid4)
191 self.failIfEqual(sid1, sid5)
192 self.failUnlessEqual(sid5, sid5a)
193 self.failIfEqual(sid5a, sid5b)
194 # they should all be hashable
195 d = {sid1: 1, sid2: 2, sid3: 3, sid4: 4, sid5: 5, sid5a: 6, sid5b: 7}
197 l1 = self.make(sid1)
198 self.failUnlessEqual(l1.name, "name1")
199 self.failUnlessEqual(l1.maxCount, 1)
200 l1s1 = l1.getLock(slave("slave1"))
201 self.failIfIdentical(l1s1, l1)
203 l4 = self.make(sid4)
204 self.failUnlessEqual(l4.maxCount, 3)
205 l4s1 = l4.getLock(slave("slave1"))
206 self.failUnlessEqual(l4s1.maxCount, 3)
208 l5 = self.make(sid5)
209 l5s1 = l5.getLock(slave("bigslave"))
210 l5s2 = l5.getLock(slave("smallslave"))
211 l5s3 = l5.getLock(slave("unnamedslave"))
212 self.failUnlessEqual(l5s1.maxCount, 4)
213 self.failUnlessEqual(l5s2.maxCount, 1)
214 self.failUnlessEqual(l5s3.maxCount, 3)
216 class GetLock(unittest.TestCase):
217 def testGet(self):
218 # the master.cfg file contains "lock ids", which are instances of
219 # MasterLock and SlaveLock but which are not actually Locks per se.
220 # When the build starts, these markers are turned into RealMasterLock
221 # and RealSlaveLock instances. This insures that any builds running
222 # on slaves that were unaffected by the config change are still
223 # referring to the same Lock instance as new builds by builders that
224 # *were* affected by the change. There have been bugs in the past in
225 # which this didn't happen, and the Locks were bypassed because half
226 # the builders were using one incarnation of the lock while the other
227 # half were using a separate (but equal) incarnation.
229 # Changing the lock id in any way should cause it to be replaced in
230 # the BotMaster. This will result in a couple of funky artifacts:
231 # builds in progress might pay attention to a different lock, so we
232 # might bypass the locking for the duration of a couple builds.
233 # There's also the problem of old Locks lingering around in
234 # BotMaster.locks, but they're small and shouldn't really cause a
235 # problem.
237 b = master.BotMaster()
238 l1 = locks.MasterLock("one")
239 l1a = locks.MasterLock("one")
240 l2 = locks.MasterLock("one", maxCount=4)
242 rl1 = b.getLockByID(l1)
243 rl2 = b.getLockByID(l1a)
244 self.failUnlessIdentical(rl1, rl2)
245 rl3 = b.getLockByID(l2)
246 self.failIfIdentical(rl1, rl3)
248 s1 = locks.SlaveLock("one")
249 s1a = locks.SlaveLock("one")
250 s2 = locks.SlaveLock("one", maxCount=4)
251 s3 = locks.SlaveLock("one", maxCount=4,
252 maxCountForSlave={"a":1, "b":2})
253 s3a = locks.SlaveLock("one", maxCount=4,
254 maxCountForSlave={"a":1, "b":2})
255 s4 = locks.SlaveLock("one", maxCount=4,
256 maxCountForSlave={"a":4, "b":4})
258 rl1 = b.getLockByID(s1)
259 rl2 = b.getLockByID(s1a)
260 self.failUnlessIdentical(rl1, rl2)
261 rl3 = b.getLockByID(s2)
262 self.failIfIdentical(rl1, rl3)
263 rl4 = b.getLockByID(s3)
264 self.failIfIdentical(rl1, rl4)
265 self.failIfIdentical(rl3, rl4)
266 rl5 = b.getLockByID(s3a)
267 self.failUnlessIdentical(rl4, rl5)
268 rl6 = b.getLockByID(s4)
269 self.failIfIdentical(rl5, rl6)
273 class LockStep(dummy.Dummy):
274 def start(self):
275 number = self.build.requests[0].number
276 self.build.requests[0].events.append(("start", number))
277 dummy.Dummy.start(self)
278 def done(self):
279 number = self.build.requests[0].number
280 self.build.requests[0].events.append(("done", number))
281 dummy.Dummy.done(self)
283 config_1 = """
284 from buildbot import locks
285 from buildbot.process import factory
286 from buildbot.buildslave import BuildSlave
287 s = factory.s
288 from buildbot.test.test_locks import LockStep
290 BuildmasterConfig = c = {}
291 c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
292 c['schedulers'] = []
293 c['slavePortnum'] = 0
295 first_lock = locks.SlaveLock('first')
296 second_lock = locks.MasterLock('second')
297 f1 = factory.BuildFactory([s(LockStep, timeout=2, locks=[first_lock])])
298 f2 = factory.BuildFactory([s(LockStep, timeout=3, locks=[second_lock])])
299 f3 = factory.BuildFactory([s(LockStep, timeout=2, locks=[])])
301 b1a = {'name': 'full1a', 'slavename': 'bot1', 'builddir': '1a', 'factory': f1}
302 b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1b', 'factory': f1}
303 b1c = {'name': 'full1c', 'slavename': 'bot1', 'builddir': '1c', 'factory': f3,
304 'locks': [first_lock, second_lock]}
305 b1d = {'name': 'full1d', 'slavename': 'bot1', 'builddir': '1d', 'factory': f2}
306 b2a = {'name': 'full2a', 'slavename': 'bot2', 'builddir': '2a', 'factory': f1}
307 b2b = {'name': 'full2b', 'slavename': 'bot2', 'builddir': '2b', 'factory': f3,
308 'locks': [second_lock]}
309 c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
312 config_1a = config_1 + \
314 b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1B', 'factory': f1}
315 c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
319 class Locks(RunMixin, unittest.TestCase):
320 def setUp(self):
321 RunMixin.setUp(self)
322 self.req1 = req1 = BuildRequest("forced build", SourceStamp())
323 req1.number = 1
324 self.req2 = req2 = BuildRequest("forced build", SourceStamp())
325 req2.number = 2
326 self.req3 = req3 = BuildRequest("forced build", SourceStamp())
327 req3.number = 3
328 req1.events = req2.events = req3.events = self.events = []
329 d = self.master.loadConfig(config_1)
330 d.addCallback(lambda res: self.master.startService())
331 d.addCallback(lambda res: self.connectSlaves(["bot1", "bot2"],
332 ["full1a", "full1b",
333 "full1c", "full1d",
334 "full2a", "full2b"]))
335 return d
337 def testLock1(self):
338 self.control.getBuilder("full1a").requestBuild(self.req1)
339 self.control.getBuilder("full1b").requestBuild(self.req2)
340 d = defer.DeferredList([self.req1.waitUntilFinished(),
341 self.req2.waitUntilFinished()])
342 d.addCallback(self._testLock1_1)
343 return d
345 def _testLock1_1(self, res):
346 # full1a should complete its step before full1b starts it
347 self.failUnlessEqual(self.events,
348 [("start", 1), ("done", 1),
349 ("start", 2), ("done", 2)])
351 def testLock1a(self):
352 # just like testLock1, but we reload the config file first, with a
353 # change that causes full1b to be changed. This tickles a design bug
354 # in which full1a and full1b wind up with distinct Lock instances.
355 d = self.master.loadConfig(config_1a)
356 d.addCallback(self._testLock1a_1)
357 return d
358 def _testLock1a_1(self, res):
359 self.control.getBuilder("full1a").requestBuild(self.req1)
360 self.control.getBuilder("full1b").requestBuild(self.req2)
361 d = defer.DeferredList([self.req1.waitUntilFinished(),
362 self.req2.waitUntilFinished()])
363 d.addCallback(self._testLock1a_2)
364 return d
366 def _testLock1a_2(self, res):
367 # full1a should complete its step before full1b starts it
368 self.failUnlessEqual(self.events,
369 [("start", 1), ("done", 1),
370 ("start", 2), ("done", 2)])
372 def testLock2(self):
373 # two builds run on separate slaves with slave-scoped locks should
374 # not interfere
375 self.control.getBuilder("full1a").requestBuild(self.req1)
376 self.control.getBuilder("full2a").requestBuild(self.req2)
377 d = defer.DeferredList([self.req1.waitUntilFinished(),
378 self.req2.waitUntilFinished()])
379 d.addCallback(self._testLock2_1)
380 return d
382 def _testLock2_1(self, res):
383 # full2a should start its step before full1a finishes it. They run on
384 # different slaves, however, so they might start in either order.
385 self.failUnless(self.events[:2] == [("start", 1), ("start", 2)] or
386 self.events[:2] == [("start", 2), ("start", 1)])
388 def testLock3(self):
389 # two builds run on separate slaves with master-scoped locks should
390 # not overlap
391 self.control.getBuilder("full1c").requestBuild(self.req1)
392 self.control.getBuilder("full2b").requestBuild(self.req2)
393 d = defer.DeferredList([self.req1.waitUntilFinished(),
394 self.req2.waitUntilFinished()])
395 d.addCallback(self._testLock3_1)
396 return d
398 def _testLock3_1(self, res):
399 # full2b should not start until after full1c finishes. The builds run
400 # on different slaves, so we can't really predict which will start
401 # first. The important thing is that they don't overlap.
402 self.failUnless(self.events == [("start", 1), ("done", 1),
403 ("start", 2), ("done", 2)]
404 or self.events == [("start", 2), ("done", 2),
405 ("start", 1), ("done", 1)]
408 def testLock4(self):
409 self.control.getBuilder("full1a").requestBuild(self.req1)
410 self.control.getBuilder("full1c").requestBuild(self.req2)
411 self.control.getBuilder("full1d").requestBuild(self.req3)
412 d = defer.DeferredList([self.req1.waitUntilFinished(),
413 self.req2.waitUntilFinished(),
414 self.req3.waitUntilFinished()])
415 d.addCallback(self._testLock4_1)
416 return d
418 def _testLock4_1(self, res):
419 # full1a starts, then full1d starts (because they do not interfere).
420 # Once both are done, full1c can run.
421 self.failUnlessEqual(self.events,
422 [("start", 1), ("start", 3),
423 ("done", 1), ("done", 3),
424 ("start", 2), ("done", 2)])