Add -x to git clean.
[buildbot.git] / buildbot / slave / commands.py
blob221a05dd1b7a614d117243ad0d704a7dd8da5c52
1 # -*- test-case-name: buildbot.test.test_slavecommand -*-
3 import os, sys, re, signal, shutil, types, time
4 from stat import ST_CTIME, ST_MTIME, ST_SIZE
6 from zope.interface import implements
7 from twisted.internet.protocol import ProcessProtocol
8 from twisted.internet import reactor, defer, task
9 from twisted.python import log, failure, runtime
10 from twisted.python.procutils import which
12 from buildbot.slave.interfaces import ISlaveCommand
13 from buildbot.slave.registry import registerSlaveCommand
15 # this used to be a CVS $-style "Revision" auto-updated keyword, but since I
16 # moved to Darcs as the primary repository, this is updated manually each
17 # time this file is changed. The last cvs_ver that was here was 1.51 .
18 command_version = "2.8"
20 # version history:
21 # >=1.17: commands are interruptable
22 # >=1.28: Arch understands 'revision', added Bazaar
23 # >=1.33: Source classes understand 'retry'
24 # >=1.39: Source classes correctly handle changes in branch (except Git)
25 # Darcs accepts 'revision' (now all do but Git) (well, and P4Sync)
26 # Arch/Baz should accept 'build-config'
27 # >=1.51: (release 0.7.3)
28 # >= 2.1: SlaveShellCommand now accepts 'initial_stdin', 'keep_stdin_open',
29 # and 'logfiles'. It now sends 'log' messages in addition to
30 # stdout/stdin/header/rc. It acquired writeStdin/closeStdin methods,
31 # but these are not remotely callable yet.
32 # (not externally visible: ShellCommandPP has writeStdin/closeStdin.
33 # ShellCommand accepts new arguments (logfiles=, initialStdin=,
34 # keepStdinOpen=) and no longer accepts stdin=)
35 # (release 0.7.4)
36 # >= 2.2: added monotone, uploadFile, and downloadFile (release 0.7.5)
37 # >= 2.3: added bzr (release 0.7.6)
38 # >= 2.4: Git understands 'revision' and branches
39 # >= 2.5: workaround added for remote 'hg clone --rev REV' when hg<0.9.2
40 # >= 2.6: added uploadDirectory
41 # >= 2.7: added usePTY option to SlaveShellCommand
42 # >= 2.8: added username and password args to SVN class
44 class CommandInterrupted(Exception):
45 pass
46 class TimeoutError(Exception):
47 pass
49 class Obfuscated:
50 """An obfuscated string in a command"""
51 def __init__(self, real, fake):
52 self.real = real
53 self.fake = fake
55 def __str__(self):
56 return self.fake
58 def __repr__(self):
59 return `self.fake`
61 def get_real(command):
62 rv = command
63 if type(command) == types.ListType:
64 rv = []
65 for elt in command:
66 if isinstance(elt, Obfuscated):
67 rv.append(elt.real)
68 else:
69 rv.append(elt)
70 return rv
71 get_real = staticmethod(get_real)
73 def get_fake(command):
74 rv = command
75 if type(command) == types.ListType:
76 rv = []
77 for elt in command:
78 if isinstance(elt, Obfuscated):
79 rv.append(elt.fake)
80 else:
81 rv.append(elt)
82 return rv
83 get_fake = staticmethod(get_fake)
85 class AbandonChain(Exception):
86 """A series of chained steps can raise this exception to indicate that
87 one of the intermediate ShellCommands has failed, such that there is no
88 point in running the remainder. 'rc' should be the non-zero exit code of
89 the failing ShellCommand."""
91 def __repr__(self):
92 return "<AbandonChain rc=%s>" % self.args[0]
94 def getCommand(name):
95 possibles = which(name)
96 if not possibles:
97 raise RuntimeError("Couldn't find executable for '%s'" % name)
98 return possibles[0]
100 def rmdirRecursive(dir):
101 """This is a replacement for shutil.rmtree that works better under
102 windows. Thanks to Bear at the OSAF for the code."""
103 if not os.path.exists(dir):
104 return
106 if os.path.islink(dir):
107 os.remove(dir)
108 return
110 # Verify the directory is read/write/execute for the current user
111 os.chmod(dir, 0700)
113 for name in os.listdir(dir):
114 full_name = os.path.join(dir, name)
115 # on Windows, if we don't have write permission we can't remove
116 # the file/directory either, so turn that on
117 if os.name == 'nt':
118 if not os.access(full_name, os.W_OK):
119 # I think this is now redundant, but I don't have an NT
120 # machine to test on, so I'm going to leave it in place
121 # -warner
122 os.chmod(full_name, 0600)
124 if os.path.isdir(full_name):
125 rmdirRecursive(full_name)
126 else:
127 os.chmod(full_name, 0700)
128 os.remove(full_name)
129 os.rmdir(dir)
131 class ShellCommandPP(ProcessProtocol):
132 debug = False
134 def __init__(self, command):
135 self.command = command
136 self.pending_stdin = ""
137 self.stdin_finished = False
139 def writeStdin(self, data):
140 assert not self.stdin_finished
141 if self.connected:
142 self.transport.write(data)
143 else:
144 self.pending_stdin += data
146 def closeStdin(self):
147 if self.connected:
148 if self.debug: log.msg(" closing stdin")
149 self.transport.closeStdin()
150 self.stdin_finished = True
152 def connectionMade(self):
153 if self.debug:
154 log.msg("ShellCommandPP.connectionMade")
155 if not self.command.process:
156 if self.debug:
157 log.msg(" assigning self.command.process: %s" %
158 (self.transport,))
159 self.command.process = self.transport
161 # TODO: maybe we shouldn't close stdin when using a PTY. I can't test
162 # this yet, recent debian glibc has a bug which causes thread-using
163 # test cases to SIGHUP trial, and the workaround is to either run
164 # the whole test with /bin/sh -c " ".join(argv) (way gross) or to
165 # not use a PTY. Once the bug is fixed, I'll be able to test what
166 # happens when you close stdin on a pty. My concern is that it will
167 # SIGHUP the child (since we are, in a sense, hanging up on them).
168 # But it may well be that keeping stdout open prevents the SIGHUP
169 # from being sent.
170 #if not self.command.usePTY:
172 if self.pending_stdin:
173 if self.debug: log.msg(" writing to stdin")
174 self.transport.write(self.pending_stdin)
175 if self.stdin_finished:
176 if self.debug: log.msg(" closing stdin")
177 self.transport.closeStdin()
179 def outReceived(self, data):
180 if self.debug:
181 log.msg("ShellCommandPP.outReceived")
182 self.command.addStdout(data)
184 def errReceived(self, data):
185 if self.debug:
186 log.msg("ShellCommandPP.errReceived")
187 self.command.addStderr(data)
189 def processEnded(self, status_object):
190 if self.debug:
191 log.msg("ShellCommandPP.processEnded", status_object)
192 # status_object is a Failure wrapped around an
193 # error.ProcessTerminated or and error.ProcessDone.
194 # requires twisted >= 1.0.4 to overcome a bug in process.py
195 sig = status_object.value.signal
196 rc = status_object.value.exitCode
197 self.command.finished(sig, rc)
199 class LogFileWatcher:
200 POLL_INTERVAL = 2
202 def __init__(self, command, name, logfile):
203 self.command = command
204 self.name = name
205 self.logfile = logfile
206 log.msg("LogFileWatcher created to watch %s" % logfile)
207 # we are created before the ShellCommand starts. If the logfile we're
208 # supposed to be watching already exists, record its size and
209 # ctime/mtime so we can tell when it starts to change.
210 self.old_logfile_stats = self.statFile()
211 self.started = False
213 # every 2 seconds we check on the file again
214 self.poller = task.LoopingCall(self.poll)
216 def start(self):
217 self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
219 def _cleanupPoll(self, err):
220 log.err(err, msg="Polling error")
221 self.poller = None
223 def stop(self):
224 self.poll()
225 if self.poller is not None:
226 self.poller.stop()
227 if self.started:
228 self.f.close()
230 def statFile(self):
231 if os.path.exists(self.logfile):
232 s = os.stat(self.logfile)
233 return (s[ST_CTIME], s[ST_MTIME], s[ST_SIZE])
234 return None
236 def poll(self):
237 if not self.started:
238 s = self.statFile()
239 if s == self.old_logfile_stats:
240 return # not started yet
241 if not s:
242 # the file was there, but now it's deleted. Forget about the
243 # initial state, clearly the process has deleted the logfile
244 # in preparation for creating a new one.
245 self.old_logfile_stats = None
246 return # no file to work with
247 self.f = open(self.logfile, "rb")
248 self.started = True
249 self.f.seek(self.f.tell(), 0)
250 while True:
251 data = self.f.read(10000)
252 if not data:
253 return
254 self.command.addLogfile(self.name, data)
257 class ShellCommand:
258 # This is a helper class, used by SlaveCommands to run programs in a
259 # child shell.
261 notreally = False
262 BACKUP_TIMEOUT = 5
263 KILL = "KILL"
264 CHUNK_LIMIT = 128*1024
266 # For sending elapsed time:
267 startTime = None
268 elapsedTime = None
269 # I wish we had easy access to CLOCK_MONOTONIC in Python:
270 # http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html
271 # Then changes to the system clock during a run wouldn't effect the "elapsed
272 # time" results.
274 def __init__(self, builder, command,
275 workdir, environ=None,
276 sendStdout=True, sendStderr=True, sendRC=True,
277 timeout=None, initialStdin=None, keepStdinOpen=False,
278 keepStdout=False, keepStderr=False, logEnviron=True,
279 logfiles={}, usePTY="slave-config"):
282 @param keepStdout: if True, we keep a copy of all the stdout text
283 that we've seen. This copy is available in
284 self.stdout, which can be read after the command
285 has finished.
286 @param keepStderr: same, for stderr
288 @param usePTY: "slave-config" -> use the SlaveBuilder's usePTY;
289 otherwise, true to use a PTY, false to not use a PTY.
292 self.builder = builder
293 self.command = Obfuscated.get_real(command)
294 self.fake_command = Obfuscated.get_fake(command)
295 self.sendStdout = sendStdout
296 self.sendStderr = sendStderr
297 self.sendRC = sendRC
298 self.logfiles = logfiles
299 self.workdir = workdir
300 self.environ = os.environ.copy()
301 if environ:
302 if environ.has_key('PYTHONPATH'):
303 ppath = environ['PYTHONPATH']
304 # Need to do os.pathsep translation. We could either do that
305 # by replacing all incoming ':'s with os.pathsep, or by
306 # accepting lists. I like lists better.
307 if not isinstance(ppath, str):
308 # If it's not a string, treat it as a sequence to be
309 # turned in to a string.
310 ppath = os.pathsep.join(ppath)
312 if self.environ.has_key('PYTHONPATH'):
313 # special case, prepend the builder's items to the
314 # existing ones. This will break if you send over empty
315 # strings, so don't do that.
316 ppath = ppath + os.pathsep + self.environ['PYTHONPATH']
318 environ['PYTHONPATH'] = ppath
320 self.environ.update(environ)
321 self.initialStdin = initialStdin
322 self.keepStdinOpen = keepStdinOpen
323 self.logEnviron = logEnviron
324 self.timeout = timeout
325 self.timer = None
326 self.keepStdout = keepStdout
327 self.keepStderr = keepStderr
330 if usePTY == "slave-config":
331 self.usePTY = self.builder.usePTY
332 else:
333 self.usePTY = usePTY
335 # usePTY=True is a convenience for cleaning up all children and
336 # grandchildren of a hung command. Fall back to usePTY=False on systems
337 # and in situations where ptys cause problems. PTYs are posix-only,
338 # and for .closeStdin to matter, we must use a pipe, not a PTY
339 if runtime.platformType != "posix" or initialStdin is not None:
340 if self.usePTY and usePTY != "slave-config":
341 self.sendStatus({'header': "WARNING: disabling usePTY for this command"})
342 self.usePTY = False
344 self.logFileWatchers = []
345 for name,filename in self.logfiles.items():
346 w = LogFileWatcher(self, name,
347 os.path.join(self.workdir, filename))
348 self.logFileWatchers.append(w)
350 def __repr__(self):
351 return "<slavecommand.ShellCommand '%s'>" % self.fake_command
353 def sendStatus(self, status):
354 self.builder.sendUpdate(status)
356 def start(self):
357 # return a Deferred which fires (with the exit code) when the command
358 # completes
359 if self.keepStdout:
360 self.stdout = ""
361 if self.keepStderr:
362 self.stderr = ""
363 self.deferred = defer.Deferred()
364 try:
365 self._startCommand()
366 except:
367 log.msg("error in ShellCommand._startCommand")
368 log.err()
369 # pretend it was a shell error
370 self.deferred.errback(AbandonChain(-1))
371 return self.deferred
373 def _startCommand(self):
374 # ensure workdir exists
375 if not os.path.isdir(self.workdir):
376 os.makedirs(self.workdir)
377 log.msg("ShellCommand._startCommand")
378 if self.notreally:
379 self.sendStatus({'header': "command '%s' in dir %s" % \
380 (self.fake_command, self.workdir)})
381 self.sendStatus({'header': "(not really)\n"})
382 self.finished(None, 0)
383 return
385 self.pp = ShellCommandPP(self)
387 if type(self.command) in types.StringTypes:
388 if runtime.platformType == 'win32':
389 argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
390 if '/c' not in argv: argv += ['/c']
391 argv += [self.command]
392 else:
393 # for posix, use /bin/sh. for other non-posix, well, doesn't
394 # hurt to try
395 argv = ['/bin/sh', '-c', self.command]
396 display = self.fake_command
397 else:
398 if runtime.platformType == 'win32':
399 argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
400 if '/c' not in argv: argv += ['/c']
401 argv += list(self.command)
402 else:
403 argv = self.command
404 display = " ".join(self.fake_command)
406 # $PWD usually indicates the current directory; spawnProcess may not
407 # update this value, though, so we set it explicitly here.
408 self.environ['PWD'] = os.path.abspath(self.workdir)
410 # self.stdin is handled in ShellCommandPP.connectionMade
412 # first header line is the command in plain text, argv joined with
413 # spaces. You should be able to cut-and-paste this into a shell to
414 # obtain the same results. If there are spaces in the arguments, too
415 # bad.
416 log.msg(" " + display)
417 self.sendStatus({'header': display+"\n"})
419 # then comes the secondary information
420 msg = " in dir %s" % (self.workdir,)
421 if self.timeout:
422 msg += " (timeout %d secs)" % (self.timeout,)
423 log.msg(" " + msg)
424 self.sendStatus({'header': msg+"\n"})
426 msg = " watching logfiles %s" % (self.logfiles,)
427 log.msg(" " + msg)
428 self.sendStatus({'header': msg+"\n"})
430 # then the obfuscated command array for resolving unambiguity
431 msg = " argv: %s" % (self.fake_command,)
432 log.msg(" " + msg)
433 self.sendStatus({'header': msg+"\n"})
435 # then the environment, since it sometimes causes problems
436 if self.logEnviron:
437 msg = " environment:\n"
438 env_names = self.environ.keys()
439 env_names.sort()
440 for name in env_names:
441 msg += " %s=%s\n" % (name, self.environ[name])
442 log.msg(" environment: %s" % (self.environ,))
443 self.sendStatus({'header': msg})
445 if self.initialStdin:
446 msg = " writing %d bytes to stdin" % len(self.initialStdin)
447 log.msg(" " + msg)
448 self.sendStatus({'header': msg+"\n"})
450 if self.keepStdinOpen:
451 msg = " leaving stdin open"
452 else:
453 msg = " closing stdin"
454 log.msg(" " + msg)
455 self.sendStatus({'header': msg+"\n"})
457 msg = " using PTY: %s" % bool(self.usePTY)
458 log.msg(" " + msg)
459 self.sendStatus({'header': msg+"\n"})
461 # this will be buffered until connectionMade is called
462 if self.initialStdin:
463 self.pp.writeStdin(self.initialStdin)
464 if not self.keepStdinOpen:
465 self.pp.closeStdin()
467 # win32eventreactor's spawnProcess (under twisted <= 2.0.1) returns
468 # None, as opposed to all the posixbase-derived reactors (which
469 # return the new Process object). This is a nuisance. We can make up
470 # for it by having the ProcessProtocol give us their .transport
471 # attribute after they get one. I'd prefer to get it from
472 # spawnProcess because I'm concerned about returning from this method
473 # without having a valid self.process to work with. (if kill() were
474 # called right after we return, but somehow before connectionMade
475 # were called, then kill() would blow up).
476 self.process = None
477 self.startTime = time.time()
478 p = reactor.spawnProcess(self.pp, argv[0], argv,
479 self.environ,
480 self.workdir,
481 usePTY=self.usePTY)
482 # connectionMade might have been called during spawnProcess
483 if not self.process:
484 self.process = p
486 # connectionMade also closes stdin as long as we're not using a PTY.
487 # This is intended to kill off inappropriately interactive commands
488 # better than the (long) hung-command timeout. ProcessPTY should be
489 # enhanced to allow the same childFDs argument that Process takes,
490 # which would let us connect stdin to /dev/null .
492 if self.timeout:
493 self.timer = reactor.callLater(self.timeout, self.doTimeout)
495 for w in self.logFileWatchers:
496 w.start()
499 def _chunkForSend(self, data):
500 # limit the chunks that we send over PB to 128k, since it has a
501 # hardwired string-size limit of 640k.
502 LIMIT = self.CHUNK_LIMIT
503 for i in range(0, len(data), LIMIT):
504 yield data[i:i+LIMIT]
506 def addStdout(self, data):
507 if self.sendStdout:
508 for chunk in self._chunkForSend(data):
509 self.sendStatus({'stdout': chunk})
510 if self.keepStdout:
511 self.stdout += data
512 if self.timer:
513 self.timer.reset(self.timeout)
515 def addStderr(self, data):
516 if self.sendStderr:
517 for chunk in self._chunkForSend(data):
518 self.sendStatus({'stderr': chunk})
519 if self.keepStderr:
520 self.stderr += data
521 if self.timer:
522 self.timer.reset(self.timeout)
524 def addLogfile(self, name, data):
525 for chunk in self._chunkForSend(data):
526 self.sendStatus({'log': (name, chunk)})
527 if self.timer:
528 self.timer.reset(self.timeout)
530 def finished(self, sig, rc):
531 self.elapsedTime = time.time() - self.startTime
532 log.msg("command finished with signal %s, exit code %s, elapsedTime: %0.6f" % (sig,rc,self.elapsedTime))
533 for w in self.logFileWatchers:
534 # this will send the final updates
535 w.stop()
536 if sig is not None:
537 rc = -1
538 if self.sendRC:
539 if sig is not None:
540 self.sendStatus(
541 {'header': "process killed by signal %d\n" % sig})
542 self.sendStatus({'rc': rc})
543 self.sendStatus({'header': "elapsedTime=%0.6f\n" % self.elapsedTime})
544 if self.timer:
545 self.timer.cancel()
546 self.timer = None
547 d = self.deferred
548 self.deferred = None
549 if d:
550 d.callback(rc)
551 else:
552 log.msg("Hey, command %s finished twice" % self)
554 def failed(self, why):
555 log.msg("ShellCommand.failed: command failed: %s" % (why,))
556 if self.timer:
557 self.timer.cancel()
558 self.timer = None
559 d = self.deferred
560 self.deferred = None
561 if d:
562 d.errback(why)
563 else:
564 log.msg("Hey, command %s finished twice" % self)
566 def doTimeout(self):
567 self.timer = None
568 msg = "command timed out: %d seconds without output" % self.timeout
569 self.kill(msg)
571 def kill(self, msg):
572 # This may be called by the timeout, or when the user has decided to
573 # abort this build.
574 if self.timer:
575 self.timer.cancel()
576 self.timer = None
577 if hasattr(self.process, "pid"):
578 msg += ", killing pid %d" % self.process.pid
579 log.msg(msg)
580 self.sendStatus({'header': "\n" + msg + "\n"})
582 hit = 0
583 if runtime.platformType == "posix":
584 try:
585 # really want to kill off all child processes too. Process
586 # Groups are ideal for this, but that requires
587 # spawnProcess(usePTY=1). Try both ways in case process was
588 # not started that way.
590 # the test suite sets self.KILL=None to tell us we should
591 # only pretend to kill the child. This lets us test the
592 # backup timer.
594 sig = None
595 if self.KILL is not None:
596 sig = getattr(signal, "SIG"+ self.KILL, None)
598 if self.KILL == None:
599 log.msg("self.KILL==None, only pretending to kill child")
600 elif sig is None:
601 log.msg("signal module is missing SIG%s" % self.KILL)
602 elif not hasattr(os, "kill"):
603 log.msg("os module is missing the 'kill' function")
604 else:
605 log.msg("trying os.kill(-pid, %d)" % (sig,))
606 # TODO: maybe use os.killpg instead of a negative pid?
607 os.kill(-self.process.pid, sig)
608 log.msg(" signal %s sent successfully" % sig)
609 hit = 1
610 except OSError:
611 # probably no-such-process, maybe because there is no process
612 # group
613 pass
614 if not hit:
615 try:
616 if self.KILL is None:
617 log.msg("self.KILL==None, only pretending to kill child")
618 else:
619 log.msg("trying process.signalProcess('KILL')")
620 self.process.signalProcess(self.KILL)
621 log.msg(" signal %s sent successfully" % (self.KILL,))
622 hit = 1
623 except OSError:
624 # could be no-such-process, because they finished very recently
625 pass
626 if not hit:
627 log.msg("signalProcess/os.kill failed both times")
629 if runtime.platformType == "posix":
630 # we only do this under posix because the win32eventreactor
631 # blocks here until the process has terminated, while closing
632 # stderr. This is weird.
633 self.pp.transport.loseConnection()
635 # finished ought to be called momentarily. Just in case it doesn't,
636 # set a timer which will abandon the command.
637 self.timer = reactor.callLater(self.BACKUP_TIMEOUT,
638 self.doBackupTimeout)
640 def doBackupTimeout(self):
641 log.msg("we tried to kill the process, and it wouldn't die.."
642 " finish anyway")
643 self.timer = None
644 self.sendStatus({'header': "SIGKILL failed to kill process\n"})
645 if self.sendRC:
646 self.sendStatus({'header': "using fake rc=-1\n"})
647 self.sendStatus({'rc': -1})
648 self.failed(TimeoutError("SIGKILL failed to kill process"))
651 def writeStdin(self, data):
652 self.pp.writeStdin(data)
654 def closeStdin(self):
655 self.pp.closeStdin()
658 class Command:
659 implements(ISlaveCommand)
661 """This class defines one command that can be invoked by the build master.
662 The command is executed on the slave side, and always sends back a
663 completion message when it finishes. It may also send intermediate status
664 as it runs (by calling builder.sendStatus). Some commands can be
665 interrupted (either by the build master or a local timeout), in which
666 case the step is expected to complete normally with a status message that
667 indicates an error occurred.
669 These commands are used by BuildSteps on the master side. Each kind of
670 BuildStep uses a single Command. The slave must implement all the
671 Commands required by the set of BuildSteps used for any given build:
672 this is checked at startup time.
674 All Commands are constructed with the same signature:
675 c = CommandClass(builder, args)
676 where 'builder' is the parent SlaveBuilder object, and 'args' is a
677 dict that is interpreted per-command.
679 The setup(args) method is available for setup, and is run from __init__.
681 The Command is started with start(). This method must be implemented in a
682 subclass, and it should return a Deferred. When your step is done, you
683 should fire the Deferred (the results are not used). If the command is
684 interrupted, it should fire the Deferred anyway.
686 While the command runs. it may send status messages back to the
687 buildmaster by calling self.sendStatus(statusdict). The statusdict is
688 interpreted by the master-side BuildStep however it likes.
690 A separate completion message is sent when the deferred fires, which
691 indicates that the Command has finished, but does not carry any status
692 data. If the Command needs to return an exit code of some sort, that
693 should be sent as a regular status message before the deferred is fired .
694 Once builder.commandComplete has been run, no more status messages may be
695 sent.
697 If interrupt() is called, the Command should attempt to shut down as
698 quickly as possible. Child processes should be killed, new ones should
699 not be started. The Command should send some kind of error status update,
700 then complete as usual by firing the Deferred.
702 .interrupted should be set by interrupt(), and can be tested to avoid
703 sending multiple error status messages.
705 If .running is False, the bot is shutting down (or has otherwise lost the
706 connection to the master), and should not send any status messages. This
707 is checked in Command.sendStatus .
711 # builder methods:
712 # sendStatus(dict) (zero or more)
713 # commandComplete() or commandInterrupted() (one, at end)
715 debug = False
716 interrupted = False
717 running = False # set by Builder, cleared on shutdown or when the
718 # Deferred fires
720 def __init__(self, builder, stepId, args):
721 self.builder = builder
722 self.stepId = stepId # just for logging
723 self.args = args
724 self.setup(args)
726 def setup(self, args):
727 """Override this in a subclass to extract items from the args dict."""
728 pass
730 def doStart(self):
731 self.running = True
732 d = defer.maybeDeferred(self.start)
733 d.addBoth(self.commandComplete)
734 return d
736 def start(self):
737 """Start the command. This method should return a Deferred that will
738 fire when the command has completed. The Deferred's argument will be
739 ignored.
741 This method should be overridden by subclasses."""
742 raise NotImplementedError, "You must implement this in a subclass"
744 def sendStatus(self, status):
745 """Send a status update to the master."""
746 if self.debug:
747 log.msg("sendStatus", status)
748 if not self.running:
749 log.msg("would sendStatus but not .running")
750 return
751 self.builder.sendUpdate(status)
753 def doInterrupt(self):
754 self.running = False
755 self.interrupt()
757 def interrupt(self):
758 """Override this in a subclass to allow commands to be interrupted.
759 May be called multiple times, test and set self.interrupted=True if
760 this matters."""
761 pass
763 def commandComplete(self, res):
764 self.running = False
765 return res
767 # utility methods, mostly used by SlaveShellCommand and the like
769 def _abandonOnFailure(self, rc):
770 if type(rc) is not int:
771 log.msg("weird, _abandonOnFailure was given rc=%s (%s)" % \
772 (rc, type(rc)))
773 assert isinstance(rc, int)
774 if rc != 0:
775 raise AbandonChain(rc)
776 return rc
778 def _sendRC(self, res):
779 self.sendStatus({'rc': 0})
781 def _checkAbandoned(self, why):
782 log.msg("_checkAbandoned", why)
783 why.trap(AbandonChain)
784 log.msg(" abandoning chain", why.value)
785 self.sendStatus({'rc': why.value.args[0]})
786 return None
790 class SlaveFileUploadCommand(Command):
792 Upload a file from slave to build master
793 Arguments:
795 - ['workdir']: base directory to use
796 - ['slavesrc']: name of the slave-side file to read from
797 - ['writer']: RemoteReference to a transfer._FileWriter object
798 - ['maxsize']: max size (in bytes) of file to write
799 - ['blocksize']: max size for each data block
801 debug = False
803 def setup(self, args):
804 self.workdir = args['workdir']
805 self.filename = args['slavesrc']
806 self.writer = args['writer']
807 self.remaining = args['maxsize']
808 self.blocksize = args['blocksize']
809 self.stderr = None
810 self.rc = 0
812 def start(self):
813 if self.debug:
814 log.msg('SlaveFileUploadCommand started')
816 # Open file
817 self.path = os.path.join(self.builder.basedir,
818 self.workdir,
819 os.path.expanduser(self.filename))
820 try:
821 self.fp = open(self.path, 'rb')
822 if self.debug:
823 log.msg('Opened %r for upload' % self.path)
824 except:
825 # TODO: this needs cleanup
826 self.fp = None
827 self.stderr = 'Cannot open file %r for upload' % self.path
828 self.rc = 1
829 if self.debug:
830 log.msg('Cannot open file %r for upload' % self.path)
832 self.sendStatus({'header': "sending %s" % self.path})
834 d = defer.Deferred()
835 reactor.callLater(0, self._loop, d)
836 def _close(res):
837 # close the file, but pass through any errors from _loop
838 d1 = self.writer.callRemote("close")
839 d1.addErrback(log.err)
840 d1.addCallback(lambda ignored: res)
841 return d1
842 d.addBoth(_close)
843 d.addBoth(self.finished)
844 return d
846 def _loop(self, fire_when_done):
847 d = defer.maybeDeferred(self._writeBlock)
848 def _done(finished):
849 if finished:
850 fire_when_done.callback(None)
851 else:
852 self._loop(fire_when_done)
853 def _err(why):
854 fire_when_done.errback(why)
855 d.addCallbacks(_done, _err)
856 return None
858 def _writeBlock(self):
859 """Write a block of data to the remote writer"""
861 if self.interrupted or self.fp is None:
862 if self.debug:
863 log.msg('SlaveFileUploadCommand._writeBlock(): end')
864 return True
866 length = self.blocksize
867 if self.remaining is not None and length > self.remaining:
868 length = self.remaining
870 if length <= 0:
871 if self.stderr is None:
872 self.stderr = 'Maximum filesize reached, truncating file %r' \
873 % self.path
874 self.rc = 1
875 data = ''
876 else:
877 data = self.fp.read(length)
879 if self.debug:
880 log.msg('SlaveFileUploadCommand._writeBlock(): '+
881 'allowed=%d readlen=%d' % (length, len(data)))
882 if len(data) == 0:
883 log.msg("EOF: callRemote(close)")
884 return True
886 if self.remaining is not None:
887 self.remaining = self.remaining - len(data)
888 assert self.remaining >= 0
889 d = self.writer.callRemote('write', data)
890 d.addCallback(lambda res: False)
891 return d
893 def interrupt(self):
894 if self.debug:
895 log.msg('interrupted')
896 if self.interrupted:
897 return
898 if self.stderr is None:
899 self.stderr = 'Upload of %r interrupted' % self.path
900 self.rc = 1
901 self.interrupted = True
902 # the next _writeBlock call will notice the .interrupted flag
904 def finished(self, res):
905 if self.debug:
906 log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
907 if self.stderr is None:
908 self.sendStatus({'rc': self.rc})
909 else:
910 self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
911 return res
913 registerSlaveCommand("uploadFile", SlaveFileUploadCommand, command_version)
916 class SlaveDirectoryUploadCommand(Command):
918 Upload a directory from slave to build master
919 Arguments:
921 - ['workdir']: base directory to use
922 - ['slavesrc']: name of the slave-side directory to read from
923 - ['writer']: RemoteReference to a transfer._DirectoryWriter object
924 - ['maxsize']: max size (in bytes) of file to write
925 - ['blocksize']: max size for each data block
927 debug = True
929 def setup(self, args):
930 self.workdir = args['workdir']
931 self.dirname = args['slavesrc']
932 self.writer = args['writer']
933 self.remaining = args['maxsize']
934 self.blocksize = args['blocksize']
935 self.stderr = None
936 self.rc = 0
938 def start(self):
939 if self.debug:
940 log.msg('SlaveDirectoryUploadCommand started')
942 # create some lists with all files and directories
943 foundFiles = []
944 foundDirs = []
946 self.baseRoot = os.path.join(self.builder.basedir,
947 self.workdir,
948 os.path.expanduser(self.dirname))
949 if self.debug:
950 log.msg("baseRoot: %r" % self.baseRoot)
952 for root, dirs, files in os.walk(self.baseRoot):
953 tempRoot = root
954 relRoot = ''
955 while (tempRoot != self.baseRoot):
956 tempRoot, tempRelRoot = os.path.split(tempRoot)
957 relRoot = os.path.join(tempRelRoot, relRoot)
958 for name in files:
959 foundFiles.append(os.path.join(relRoot, name))
960 for directory in dirs:
961 foundDirs.append(os.path.join(relRoot, directory))
963 if self.debug:
964 log.msg("foundDirs: %s" % (str(foundDirs)))
965 log.msg("foundFiles: %s" % (str(foundFiles)))
967 # create all directories on the master, to catch also empty ones
968 for dirname in foundDirs:
969 self.writer.callRemote("createdir", dirname)
971 for filename in foundFiles:
972 self._writeFile(filename)
974 return None
976 def _writeFile(self, filename):
977 """Write a file to the remote writer"""
979 log.msg("_writeFile: %r" % (filename))
980 self.writer.callRemote('open', filename)
981 data = open(os.path.join(self.baseRoot, filename), "r").read()
982 self.writer.callRemote('write', data)
983 self.writer.callRemote('close')
984 return None
986 def interrupt(self):
987 if self.debug:
988 log.msg('interrupted')
989 if self.interrupted:
990 return
991 if self.stderr is None:
992 self.stderr = 'Upload of %r interrupted' % self.path
993 self.rc = 1
994 self.interrupted = True
995 # the next _writeBlock call will notice the .interrupted flag
997 def finished(self, res):
998 if self.debug:
999 log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
1000 if self.stderr is None:
1001 self.sendStatus({'rc': self.rc})
1002 else:
1003 self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
1004 return res
1006 registerSlaveCommand("uploadDirectory", SlaveDirectoryUploadCommand, command_version)
1009 class SlaveFileDownloadCommand(Command):
1011 Download a file from master to slave
1012 Arguments:
1014 - ['workdir']: base directory to use
1015 - ['slavedest']: name of the slave-side file to be created
1016 - ['reader']: RemoteReference to a transfer._FileReader object
1017 - ['maxsize']: max size (in bytes) of file to write
1018 - ['blocksize']: max size for each data block
1019 - ['mode']: access mode for the new file
1021 debug = False
1023 def setup(self, args):
1024 self.workdir = args['workdir']
1025 self.filename = args['slavedest']
1026 self.reader = args['reader']
1027 self.bytes_remaining = args['maxsize']
1028 self.blocksize = args['blocksize']
1029 self.mode = args['mode']
1030 self.stderr = None
1031 self.rc = 0
1033 def start(self):
1034 if self.debug:
1035 log.msg('SlaveFileDownloadCommand starting')
1037 # Open file
1038 self.path = os.path.join(self.builder.basedir,
1039 self.workdir,
1040 os.path.expanduser(self.filename))
1042 dirname = os.path.dirname(self.path)
1043 if not os.path.exists(dirname):
1044 os.makedirs(dirname)
1046 try:
1047 self.fp = open(self.path, 'wb')
1048 if self.debug:
1049 log.msg('Opened %r for download' % self.path)
1050 if self.mode is not None:
1051 # note: there is a brief window during which the new file
1052 # will have the buildslave's default (umask) mode before we
1053 # set the new one. Don't use this mode= feature to keep files
1054 # private: use the buildslave's umask for that instead. (it
1055 # is possible to call os.umask() before and after the open()
1056 # call, but cleaning up from exceptions properly is more of a
1057 # nuisance that way).
1058 os.chmod(self.path, self.mode)
1059 except IOError:
1060 # TODO: this still needs cleanup
1061 self.fp = None
1062 self.stderr = 'Cannot open file %r for download' % self.path
1063 self.rc = 1
1064 if self.debug:
1065 log.msg('Cannot open file %r for download' % self.path)
1067 d = defer.Deferred()
1068 reactor.callLater(0, self._loop, d)
1069 def _close(res):
1070 # close the file, but pass through any errors from _loop
1071 d1 = self.reader.callRemote('close')
1072 d1.addErrback(log.err)
1073 d1.addCallback(lambda ignored: res)
1074 return d1
1075 d.addBoth(_close)
1076 d.addBoth(self.finished)
1077 return d
1079 def _loop(self, fire_when_done):
1080 d = defer.maybeDeferred(self._readBlock)
1081 def _done(finished):
1082 if finished:
1083 fire_when_done.callback(None)
1084 else:
1085 self._loop(fire_when_done)
1086 def _err(why):
1087 fire_when_done.errback(why)
1088 d.addCallbacks(_done, _err)
1089 return None
1091 def _readBlock(self):
1092 """Read a block of data from the remote reader."""
1094 if self.interrupted or self.fp is None:
1095 if self.debug:
1096 log.msg('SlaveFileDownloadCommand._readBlock(): end')
1097 return True
1099 length = self.blocksize
1100 if self.bytes_remaining is not None and length > self.bytes_remaining:
1101 length = self.bytes_remaining
1103 if length <= 0:
1104 if self.stderr is None:
1105 self.stderr = 'Maximum filesize reached, truncating file %r' \
1106 % self.path
1107 self.rc = 1
1108 return True
1109 else:
1110 d = self.reader.callRemote('read', length)
1111 d.addCallback(self._writeData)
1112 return d
1114 def _writeData(self, data):
1115 if self.debug:
1116 log.msg('SlaveFileDownloadCommand._readBlock(): readlen=%d' %
1117 len(data))
1118 if len(data) == 0:
1119 return True
1121 if self.bytes_remaining is not None:
1122 self.bytes_remaining = self.bytes_remaining - len(data)
1123 assert self.bytes_remaining >= 0
1124 self.fp.write(data)
1125 return False
1127 def interrupt(self):
1128 if self.debug:
1129 log.msg('interrupted')
1130 if self.interrupted:
1131 return
1132 if self.stderr is None:
1133 self.stderr = 'Download of %r interrupted' % self.path
1134 self.rc = 1
1135 self.interrupted = True
1136 # now we wait for the next read request to return. _readBlock will
1137 # abandon the file when it sees self.interrupted set.
1139 def finished(self, res):
1140 if self.fp is not None:
1141 self.fp.close()
1143 if self.debug:
1144 log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
1145 if self.stderr is None:
1146 self.sendStatus({'rc': self.rc})
1147 else:
1148 self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
1149 return res
1151 registerSlaveCommand("downloadFile", SlaveFileDownloadCommand, command_version)
1155 class SlaveShellCommand(Command):
1156 """This is a Command which runs a shell command. The args dict contains
1157 the following keys:
1159 - ['command'] (required): a shell command to run. If this is a string,
1160 it will be run with /bin/sh (['/bin/sh',
1161 '-c', command]). If it is a list
1162 (preferred), it will be used directly.
1163 - ['workdir'] (required): subdirectory in which the command will be
1164 run, relative to the builder dir
1165 - ['env']: a dict of environment variables to augment/replace
1166 os.environ . PYTHONPATH is treated specially, and
1167 should be a list of path components to be prepended to
1168 any existing PYTHONPATH environment variable.
1169 - ['initial_stdin']: a string which will be written to the command's
1170 stdin as soon as it starts
1171 - ['keep_stdin_open']: unless True, the command's stdin will be
1172 closed as soon as initial_stdin has been
1173 written. Set this to True if you plan to write
1174 to stdin after the command has been started.
1175 - ['want_stdout']: 0 if stdout should be thrown away
1176 - ['want_stderr']: 0 if stderr should be thrown away
1177 - ['usePTY']: True or False if the command should use a PTY (defaults to
1178 configuration of the slave)
1179 - ['not_really']: 1 to skip execution and return rc=0
1180 - ['timeout']: seconds of silence to tolerate before killing command
1181 - ['logfiles']: dict mapping LogFile name to the workdir-relative
1182 filename of a local log file. This local file will be
1183 watched just like 'tail -f', and all changes will be
1184 written to 'log' status updates.
1186 ShellCommand creates the following status messages:
1187 - {'stdout': data} : when stdout data is available
1188 - {'stderr': data} : when stderr data is available
1189 - {'header': data} : when headers (command start/stop) are available
1190 - {'log': (logfile_name, data)} : when log files have new contents
1191 - {'rc': rc} : when the process has terminated
1194 def start(self):
1195 args = self.args
1196 # args['workdir'] is relative to Builder directory, and is required.
1197 assert args['workdir'] is not None
1198 workdir = os.path.join(self.builder.basedir, args['workdir'])
1200 c = ShellCommand(self.builder, args['command'],
1201 workdir, environ=args.get('env'),
1202 timeout=args.get('timeout', None),
1203 sendStdout=args.get('want_stdout', True),
1204 sendStderr=args.get('want_stderr', True),
1205 sendRC=True,
1206 initialStdin=args.get('initial_stdin'),
1207 keepStdinOpen=args.get('keep_stdin_open'),
1208 logfiles=args.get('logfiles', {}),
1209 usePTY=args.get('usePTY', "slave-config"),
1211 self.command = c
1212 d = self.command.start()
1213 return d
1215 def interrupt(self):
1216 self.interrupted = True
1217 self.command.kill("command interrupted")
1219 def writeStdin(self, data):
1220 self.command.writeStdin(data)
1222 def closeStdin(self):
1223 self.command.closeStdin()
1225 registerSlaveCommand("shell", SlaveShellCommand, command_version)
1228 class DummyCommand(Command):
1230 I am a dummy no-op command that by default takes 5 seconds to complete.
1231 See L{buildbot.steps.dummy.RemoteDummy}
1234 def start(self):
1235 self.d = defer.Deferred()
1236 log.msg(" starting dummy command [%s]" % self.stepId)
1237 self.timer = reactor.callLater(1, self.doStatus)
1238 return self.d
1240 def interrupt(self):
1241 if self.interrupted:
1242 return
1243 self.timer.cancel()
1244 self.timer = None
1245 self.interrupted = True
1246 self.finished()
1248 def doStatus(self):
1249 log.msg(" sending intermediate status")
1250 self.sendStatus({'stdout': 'data'})
1251 timeout = self.args.get('timeout', 5) + 1
1252 self.timer = reactor.callLater(timeout - 1, self.finished)
1254 def finished(self):
1255 log.msg(" dummy command finished [%s]" % self.stepId)
1256 if self.interrupted:
1257 self.sendStatus({'rc': 1})
1258 else:
1259 self.sendStatus({'rc': 0})
1260 self.d.callback(0)
1262 registerSlaveCommand("dummy", DummyCommand, command_version)
1265 # this maps handle names to a callable. When the WaitCommand starts, this
1266 # callable is invoked with no arguments. It should return a Deferred. When
1267 # that Deferred fires, our WaitCommand will finish.
1268 waitCommandRegistry = {}
1270 class WaitCommand(Command):
1272 I am a dummy command used by the buildbot unit test suite. I want for the
1273 unit test to tell us to finish. See L{buildbot.steps.dummy.Wait}
1276 def start(self):
1277 self.d = defer.Deferred()
1278 log.msg(" starting wait command [%s]" % self.stepId)
1279 handle = self.args['handle']
1280 cb = waitCommandRegistry[handle]
1281 del waitCommandRegistry[handle]
1282 def _called():
1283 log.msg(" wait-%s starting" % (handle,))
1284 d = cb()
1285 def _done(res):
1286 log.msg(" wait-%s finishing: %s" % (handle, res))
1287 return res
1288 d.addBoth(_done)
1289 d.addCallbacks(self.finished, self.failed)
1290 reactor.callLater(0, _called)
1291 return self.d
1293 def interrupt(self):
1294 log.msg(" wait command interrupted")
1295 if self.interrupted:
1296 return
1297 self.interrupted = True
1298 self.finished("interrupted")
1300 def finished(self, res):
1301 log.msg(" wait command finished [%s]" % self.stepId)
1302 if self.interrupted:
1303 self.sendStatus({'rc': 2})
1304 else:
1305 self.sendStatus({'rc': 0})
1306 self.d.callback(0)
1307 def failed(self, why):
1308 log.msg(" wait command failed [%s]" % self.stepId)
1309 self.sendStatus({'rc': 1})
1310 self.d.callback(0)
1312 registerSlaveCommand("dummy.wait", WaitCommand, command_version)
1315 class SourceBase(Command):
1316 """Abstract base class for Version Control System operations (checkout
1317 and update). This class extracts the following arguments from the
1318 dictionary received from the master:
1320 - ['workdir']: (required) the subdirectory where the buildable sources
1321 should be placed
1323 - ['mode']: one of update/copy/clobber/export, defaults to 'update'
1325 - ['revision']: If not None, this is an int or string which indicates
1326 which sources (along a time-like axis) should be used.
1327 It is the thing you provide as the CVS -r or -D
1328 argument.
1330 - ['patch']: If not None, this is a tuple of (striplevel, patch)
1331 which contains a patch that should be applied after the
1332 checkout has occurred. Once applied, the tree is no
1333 longer eligible for use with mode='update', and it only
1334 makes sense to use this in conjunction with a
1335 ['revision'] argument. striplevel is an int, and patch
1336 is a string in standard unified diff format. The patch
1337 will be applied with 'patch -p%d <PATCH', with
1338 STRIPLEVEL substituted as %d. The command will fail if
1339 the patch process fails (rejected hunks).
1341 - ['timeout']: seconds of silence tolerated before we kill off the
1342 command
1344 - ['retry']: If not None, this is a tuple of (delay, repeats)
1345 which means that any failed VC updates should be
1346 reattempted, up to REPEATS times, after a delay of
1347 DELAY seconds. This is intended to deal with slaves
1348 that experience transient network failures.
1351 sourcedata = ""
1353 def setup(self, args):
1354 # if we need to parse the output, use this environment. Otherwise
1355 # command output will be in whatever the buildslave's native language
1356 # has been set to.
1357 self.env = os.environ.copy()
1358 self.env['LC_MESSAGES'] = "C"
1360 self.workdir = args['workdir']
1361 self.mode = args.get('mode', "update")
1362 self.revision = args.get('revision')
1363 self.patch = args.get('patch')
1364 self.timeout = args.get('timeout', 120)
1365 self.retry = args.get('retry')
1366 # VC-specific subclasses should override this to extract more args.
1367 # Make sure to upcall!
1369 def start(self):
1370 self.sendStatus({'header': "starting " + self.header + "\n"})
1371 self.command = None
1373 # self.srcdir is where the VC system should put the sources
1374 if self.mode == "copy":
1375 self.srcdir = "source" # hardwired directory name, sorry
1376 else:
1377 self.srcdir = self.workdir
1378 self.sourcedatafile = os.path.join(self.builder.basedir,
1379 self.srcdir,
1380 ".buildbot-sourcedata")
1382 d = defer.succeed(None)
1383 self.maybeClobber(d)
1384 if not (self.sourcedirIsUpdateable() and self.sourcedataMatches()):
1385 # the directory cannot be updated, so we have to clobber it.
1386 # Perhaps the master just changed modes from 'export' to
1387 # 'update'.
1388 d.addCallback(self.doClobber, self.srcdir)
1390 d.addCallback(self.doVC)
1392 if self.mode == "copy":
1393 d.addCallback(self.doCopy)
1394 if self.patch:
1395 d.addCallback(self.doPatch)
1396 d.addCallbacks(self._sendRC, self._checkAbandoned)
1397 return d
1399 def maybeClobber(self, d):
1400 # do we need to clobber anything?
1401 if self.mode in ("copy", "clobber", "export"):
1402 d.addCallback(self.doClobber, self.workdir)
1404 def interrupt(self):
1405 self.interrupted = True
1406 if self.command:
1407 self.command.kill("command interrupted")
1409 def doVC(self, res):
1410 if self.interrupted:
1411 raise AbandonChain(1)
1412 if self.sourcedirIsUpdateable() and self.sourcedataMatches():
1413 d = self.doVCUpdate()
1414 d.addCallback(self.maybeDoVCFallback)
1415 else:
1416 d = self.doVCFull()
1417 d.addBoth(self.maybeDoVCRetry)
1418 d.addCallback(self._abandonOnFailure)
1419 d.addCallback(self._handleGotRevision)
1420 d.addCallback(self.writeSourcedata)
1421 return d
1423 def sourcedataMatches(self):
1424 try:
1425 olddata = open(self.sourcedatafile, "r").read()
1426 if olddata != self.sourcedata:
1427 return False
1428 except IOError:
1429 return False
1430 return True
1432 def _handleGotRevision(self, res):
1433 d = defer.maybeDeferred(self.parseGotRevision)
1434 d.addCallback(lambda got_revision:
1435 self.sendStatus({'got_revision': got_revision}))
1436 return d
1438 def parseGotRevision(self):
1439 """Override this in a subclass. It should return a string that
1440 represents which revision was actually checked out, or a Deferred
1441 that will fire with such a string. If, in a future build, you were to
1442 pass this 'got_revision' string in as the 'revision' component of a
1443 SourceStamp, you should wind up with the same source code as this
1444 checkout just obtained.
1446 It is probably most useful to scan self.command.stdout for a string
1447 of some sort. Be sure to set keepStdout=True on the VC command that
1448 you run, so that you'll have something available to look at.
1450 If this information is unavailable, just return None."""
1452 return None
1454 def writeSourcedata(self, res):
1455 open(self.sourcedatafile, "w").write(self.sourcedata)
1456 return res
1458 def sourcedirIsUpdateable(self):
1459 raise NotImplementedError("this must be implemented in a subclass")
1461 def doVCUpdate(self):
1462 raise NotImplementedError("this must be implemented in a subclass")
1464 def doVCFull(self):
1465 raise NotImplementedError("this must be implemented in a subclass")
1467 def maybeDoVCFallback(self, rc):
1468 if type(rc) is int and rc == 0:
1469 return rc
1470 if self.interrupted:
1471 raise AbandonChain(1)
1472 msg = "update failed, clobbering and trying again"
1473 self.sendStatus({'header': msg + "\n"})
1474 log.msg(msg)
1475 d = self.doClobber(None, self.srcdir)
1476 d.addCallback(self.doVCFallback2)
1477 return d
1479 def doVCFallback2(self, res):
1480 msg = "now retrying VC operation"
1481 self.sendStatus({'header': msg + "\n"})
1482 log.msg(msg)
1483 d = self.doVCFull()
1484 d.addBoth(self.maybeDoVCRetry)
1485 d.addCallback(self._abandonOnFailure)
1486 return d
1488 def maybeDoVCRetry(self, res):
1489 """We get here somewhere after a VC chain has finished. res could
1490 be::
1492 - 0: the operation was successful
1493 - nonzero: the operation failed. retry if possible
1494 - AbandonChain: the operation failed, someone else noticed. retry.
1495 - Failure: some other exception, re-raise
1498 if isinstance(res, failure.Failure):
1499 if self.interrupted:
1500 return res # don't re-try interrupted builds
1501 res.trap(AbandonChain)
1502 else:
1503 if type(res) is int and res == 0:
1504 return res
1505 if self.interrupted:
1506 raise AbandonChain(1)
1507 # if we get here, we should retry, if possible
1508 if self.retry:
1509 delay, repeats = self.retry
1510 if repeats >= 0:
1511 self.retry = (delay, repeats-1)
1512 msg = ("update failed, trying %d more times after %d seconds"
1513 % (repeats, delay))
1514 self.sendStatus({'header': msg + "\n"})
1515 log.msg(msg)
1516 d = defer.Deferred()
1517 self.maybeClobber(d)
1518 d.addCallback(lambda res: self.doVCFull())
1519 d.addBoth(self.maybeDoVCRetry)
1520 reactor.callLater(delay, d.callback, None)
1521 return d
1522 return res
1524 def doClobber(self, dummy, dirname):
1525 # TODO: remove the old tree in the background
1526 ## workdir = os.path.join(self.builder.basedir, self.workdir)
1527 ## deaddir = self.workdir + ".deleting"
1528 ## if os.path.isdir(workdir):
1529 ## try:
1530 ## os.rename(workdir, deaddir)
1531 ## # might fail if deaddir already exists: previous deletion
1532 ## # hasn't finished yet
1533 ## # start the deletion in the background
1534 ## # TODO: there was a solaris/NetApp/NFS problem where a
1535 ## # process that was still running out of the directory we're
1536 ## # trying to delete could prevent the rm-rf from working. I
1537 ## # think it stalled the rm, but maybe it just died with
1538 ## # permission issues. Try to detect this.
1539 ## os.commands("rm -rf %s &" % deaddir)
1540 ## except:
1541 ## # fall back to sequential delete-then-checkout
1542 ## pass
1543 d = os.path.join(self.builder.basedir, dirname)
1544 if runtime.platformType != "posix":
1545 # if we're running on w32, use rmtree instead. It will block,
1546 # but hopefully it won't take too long.
1547 rmdirRecursive(d)
1548 return defer.succeed(0)
1549 command = ["rm", "-rf", d]
1550 c = ShellCommand(self.builder, command, self.builder.basedir,
1551 sendRC=0, timeout=self.timeout, usePTY=False)
1553 self.command = c
1554 # sendRC=0 means the rm command will send stdout/stderr to the
1555 # master, but not the rc=0 when it finishes. That job is left to
1556 # _sendRC
1557 d = c.start()
1558 d.addCallback(self._abandonOnFailure)
1559 return d
1561 def doCopy(self, res):
1562 # now copy tree to workdir
1563 fromdir = os.path.join(self.builder.basedir, self.srcdir)
1564 todir = os.path.join(self.builder.basedir, self.workdir)
1565 if runtime.platformType != "posix":
1566 self.sendStatus({'header': "Since we're on a non-POSIX platform, "
1567 "we're not going to try to execute cp in a subprocess, but instead "
1568 "use shutil.copytree(), which will block until it is complete. "
1569 "fromdir: %s, todir: %s\n" % (fromdir, todir)})
1570 shutil.copytree(fromdir, todir)
1571 return defer.succeed(0)
1573 if not os.path.exists(os.path.dirname(todir)):
1574 os.makedirs(os.path.dirname(todir))
1575 if os.path.exists(todir):
1576 # I don't think this happens, but just in case..
1577 log.msg("cp target '%s' already exists -- cp will not do what you think!" % todir)
1579 command = ['cp', '-R', '-P', '-p', fromdir, todir]
1580 c = ShellCommand(self.builder, command, self.builder.basedir,
1581 sendRC=False, timeout=self.timeout, usePTY=False)
1582 self.command = c
1583 d = c.start()
1584 d.addCallback(self._abandonOnFailure)
1585 return d
1587 def doPatch(self, res):
1588 patchlevel, diff = self.patch
1589 command = [getCommand("patch"), '-p%d' % patchlevel]
1590 dir = os.path.join(self.builder.basedir, self.workdir)
1591 # mark the directory so we don't try to update it later
1592 open(os.path.join(dir, ".buildbot-patched"), "w").write("patched\n")
1593 # now apply the patch
1594 c = ShellCommand(self.builder, command, dir,
1595 sendRC=False, timeout=self.timeout,
1596 initialStdin=diff, usePTY=False)
1597 self.command = c
1598 d = c.start()
1599 d.addCallback(self._abandonOnFailure)
1600 return d
1603 class CVS(SourceBase):
1604 """CVS-specific VC operation. In addition to the arguments handled by
1605 SourceBase, this command reads the following keys:
1607 ['cvsroot'] (required): the CVSROOT repository string
1608 ['cvsmodule'] (required): the module to be retrieved
1609 ['branch']: a '-r' tag or branch name to use for the checkout/update
1610 ['login']: a string for use as a password to 'cvs login'
1611 ['global_options']: a list of strings to use before the CVS verb
1614 header = "cvs operation"
1616 def setup(self, args):
1617 SourceBase.setup(self, args)
1618 self.vcexe = getCommand("cvs")
1619 self.cvsroot = args['cvsroot']
1620 self.cvsmodule = args['cvsmodule']
1621 self.global_options = args.get('global_options', [])
1622 self.branch = args.get('branch')
1623 self.login = args.get('login')
1624 self.sourcedata = "%s\n%s\n%s\n" % (self.cvsroot, self.cvsmodule,
1625 self.branch)
1627 def sourcedirIsUpdateable(self):
1628 if os.path.exists(os.path.join(self.builder.basedir,
1629 self.srcdir, ".buildbot-patched")):
1630 return False
1631 return os.path.isdir(os.path.join(self.builder.basedir,
1632 self.srcdir, "CVS"))
1634 def start(self):
1635 if self.login is not None:
1636 # need to do a 'cvs login' command first
1637 d = self.builder.basedir
1638 command = ([self.vcexe, '-d', self.cvsroot] + self.global_options
1639 + ['login'])
1640 c = ShellCommand(self.builder, command, d,
1641 sendRC=False, timeout=self.timeout,
1642 initialStdin=self.login+"\n", usePTY=False)
1643 self.command = c
1644 d = c.start()
1645 d.addCallback(self._abandonOnFailure)
1646 d.addCallback(self._didLogin)
1647 return d
1648 else:
1649 return self._didLogin(None)
1651 def _didLogin(self, res):
1652 # now we really start
1653 return SourceBase.start(self)
1655 def doVCUpdate(self):
1656 d = os.path.join(self.builder.basedir, self.srcdir)
1657 command = [self.vcexe, '-z3'] + self.global_options + ['update', '-dP']
1658 if self.branch:
1659 command += ['-r', self.branch]
1660 if self.revision:
1661 command += ['-D', self.revision]
1662 c = ShellCommand(self.builder, command, d,
1663 sendRC=False, timeout=self.timeout, usePTY=False)
1664 self.command = c
1665 return c.start()
1667 def doVCFull(self):
1668 d = self.builder.basedir
1669 if self.mode == "export":
1670 verb = "export"
1671 else:
1672 verb = "checkout"
1673 command = ([self.vcexe, '-d', self.cvsroot, '-z3'] +
1674 self.global_options +
1675 [verb, '-d', self.srcdir])
1676 if self.branch:
1677 command += ['-r', self.branch]
1678 if self.revision:
1679 command += ['-D', self.revision]
1680 command += [self.cvsmodule]
1681 c = ShellCommand(self.builder, command, d,
1682 sendRC=False, timeout=self.timeout, usePTY=False)
1683 self.command = c
1684 return c.start()
1686 def parseGotRevision(self):
1687 # CVS does not have any kind of revision stamp to speak of. We return
1688 # the current timestamp as a best-effort guess, but this depends upon
1689 # the local system having a clock that is
1690 # reasonably-well-synchronized with the repository.
1691 return time.strftime("%Y-%m-%d %H:%M:%S +0000", time.gmtime())
1693 registerSlaveCommand("cvs", CVS, command_version)
1695 class SVN(SourceBase):
1696 """Subversion-specific VC operation. In addition to the arguments
1697 handled by SourceBase, this command reads the following keys:
1699 ['svnurl'] (required): the SVN repository string
1700 ['username'] Username passed to the svn command
1701 ['password'] Password passed to the svn command
1704 header = "svn operation"
1706 def setup(self, args):
1707 SourceBase.setup(self, args)
1708 self.vcexe = getCommand("svn")
1709 self.svnurl = args['svnurl']
1710 self.sourcedata = "%s\n" % self.svnurl
1712 self.extra_args = []
1713 if args.has_key('username'):
1714 self.extra_args.extend(["--username", args['username']])
1715 if args.has_key('password'):
1716 self.extra_args.extend(["--password", Obfuscated(args['password'], "XXXX")])
1718 def sourcedirIsUpdateable(self):
1719 if os.path.exists(os.path.join(self.builder.basedir,
1720 self.srcdir, ".buildbot-patched")):
1721 return False
1722 return os.path.isdir(os.path.join(self.builder.basedir,
1723 self.srcdir, ".svn"))
1725 def doVCUpdate(self):
1726 revision = self.args['revision'] or 'HEAD'
1727 # update: possible for mode in ('copy', 'update')
1728 d = os.path.join(self.builder.basedir, self.srcdir)
1729 command = [self.vcexe, 'update'] + \
1730 self.extra_args + \
1731 ['--revision', str(revision),
1732 '--non-interactive', '--no-auth-cache']
1733 c = ShellCommand(self.builder, command, d,
1734 sendRC=False, timeout=self.timeout,
1735 keepStdout=True, usePTY=False)
1736 self.command = c
1737 return c.start()
1739 def doVCFull(self):
1740 revision = self.args['revision'] or 'HEAD'
1741 d = self.builder.basedir
1742 if self.mode == "export":
1743 command = [self.vcexe, 'export'] + \
1744 self.extra_args + \
1745 ['--revision', str(revision),
1746 '--non-interactive', '--no-auth-cache',
1747 self.svnurl, self.srcdir]
1748 else:
1749 # mode=='clobber', or copy/update on a broken workspace
1750 command = [self.vcexe, 'checkout'] + \
1751 self.extra_args + \
1752 ['--revision', str(revision),
1753 '--non-interactive', '--no-auth-cache',
1754 self.svnurl, self.srcdir]
1755 c = ShellCommand(self.builder, command, d,
1756 sendRC=False, timeout=self.timeout,
1757 keepStdout=True, usePTY=False)
1758 self.command = c
1759 return c.start()
1761 def getSvnVersionCommand(self):
1763 Get the (shell) command used to determine SVN revision number
1764 of checked-out code
1766 return: list of strings, passable as the command argument to ShellCommand
1768 # svn checkout operations finish with 'Checked out revision 16657.'
1769 # svn update operations finish the line 'At revision 16654.'
1770 # But we don't use those. Instead, run 'svnversion'.
1771 svnversion_command = getCommand("svnversion")
1772 # older versions of 'svnversion' (1.1.4) require the WC_PATH
1773 # argument, newer ones (1.3.1) do not.
1774 return [svnversion_command, "."]
1776 def parseGotRevision(self):
1777 c = ShellCommand(self.builder,
1778 self.getSvnVersionCommand(),
1779 os.path.join(self.builder.basedir, self.srcdir),
1780 environ=self.env,
1781 sendStdout=False, sendStderr=False, sendRC=False,
1782 keepStdout=True, usePTY=False)
1783 d = c.start()
1784 def _parse(res):
1785 r_raw = c.stdout.strip()
1786 # Extract revision from the version "number" string
1787 r = r_raw.rstrip('MS')
1788 r = r.split(':')[-1]
1789 got_version = None
1790 try:
1791 got_version = int(r)
1792 except ValueError:
1793 msg =("SVN.parseGotRevision unable to parse output "
1794 "of svnversion: '%s'" % r_raw)
1795 log.msg(msg)
1796 self.sendStatus({'header': msg + "\n"})
1797 return got_version
1798 d.addCallback(_parse)
1799 return d
1802 registerSlaveCommand("svn", SVN, command_version)
1804 class Darcs(SourceBase):
1805 """Darcs-specific VC operation. In addition to the arguments
1806 handled by SourceBase, this command reads the following keys:
1808 ['repourl'] (required): the Darcs repository string
1811 header = "darcs operation"
1813 def setup(self, args):
1814 SourceBase.setup(self, args)
1815 self.vcexe = getCommand("darcs")
1816 self.repourl = args['repourl']
1817 self.sourcedata = "%s\n" % self.repourl
1818 self.revision = self.args.get('revision')
1820 def sourcedirIsUpdateable(self):
1821 if os.path.exists(os.path.join(self.builder.basedir,
1822 self.srcdir, ".buildbot-patched")):
1823 return False
1824 if self.revision:
1825 # checking out a specific revision requires a full 'darcs get'
1826 return False
1827 return os.path.isdir(os.path.join(self.builder.basedir,
1828 self.srcdir, "_darcs"))
1830 def doVCUpdate(self):
1831 assert not self.revision
1832 # update: possible for mode in ('copy', 'update')
1833 d = os.path.join(self.builder.basedir, self.srcdir)
1834 command = [self.vcexe, 'pull', '--all', '--verbose']
1835 c = ShellCommand(self.builder, command, d,
1836 sendRC=False, timeout=self.timeout, usePTY=False)
1837 self.command = c
1838 return c.start()
1840 def doVCFull(self):
1841 # checkout or export
1842 d = self.builder.basedir
1843 command = [self.vcexe, 'get', '--verbose', '--partial',
1844 '--repo-name', self.srcdir]
1845 if self.revision:
1846 # write the context to a file
1847 n = os.path.join(self.builder.basedir, ".darcs-context")
1848 f = open(n, "wb")
1849 f.write(self.revision)
1850 f.close()
1851 # tell Darcs to use that context
1852 command.append('--context')
1853 command.append(n)
1854 command.append(self.repourl)
1856 c = ShellCommand(self.builder, command, d,
1857 sendRC=False, timeout=self.timeout, usePTY=False)
1858 self.command = c
1859 d = c.start()
1860 if self.revision:
1861 d.addCallback(self.removeContextFile, n)
1862 return d
1864 def removeContextFile(self, res, n):
1865 os.unlink(n)
1866 return res
1868 def parseGotRevision(self):
1869 # we use 'darcs context' to find out what we wound up with
1870 command = [self.vcexe, "changes", "--context"]
1871 c = ShellCommand(self.builder, command,
1872 os.path.join(self.builder.basedir, self.srcdir),
1873 environ=self.env,
1874 sendStdout=False, sendStderr=False, sendRC=False,
1875 keepStdout=True, usePTY=False)
1876 d = c.start()
1877 d.addCallback(lambda res: c.stdout)
1878 return d
1880 registerSlaveCommand("darcs", Darcs, command_version)
1882 class Monotone(SourceBase):
1883 """Monotone-specific VC operation. In addition to the arguments handled
1884 by SourceBase, this command reads the following keys:
1886 ['server_addr'] (required): the address of the server to pull from
1887 ['branch'] (required): the branch the revision is on
1888 ['db_path'] (required): the local database path to use
1889 ['revision'] (required): the revision to check out
1890 ['monotone']: (required): path to monotone executable
1893 header = "monotone operation"
1895 def setup(self, args):
1896 SourceBase.setup(self, args)
1897 self.server_addr = args["server_addr"]
1898 self.branch = args["branch"]
1899 self.db_path = args["db_path"]
1900 self.revision = args["revision"]
1901 self.monotone = args["monotone"]
1902 self._made_fulls = False
1903 self._pull_timeout = args["timeout"]
1905 def _makefulls(self):
1906 if not self._made_fulls:
1907 basedir = self.builder.basedir
1908 self.full_db_path = os.path.join(basedir, self.db_path)
1909 self.full_srcdir = os.path.join(basedir, self.srcdir)
1910 self._made_fulls = True
1912 def sourcedirIsUpdateable(self):
1913 self._makefulls()
1914 if os.path.exists(os.path.join(self.full_srcdir,
1915 ".buildbot_patched")):
1916 return False
1917 return (os.path.isfile(self.full_db_path)
1918 and os.path.isdir(os.path.join(self.full_srcdir, "MT")))
1920 def doVCUpdate(self):
1921 return self._withFreshDb(self._doUpdate)
1923 def _doUpdate(self):
1924 # update: possible for mode in ('copy', 'update')
1925 command = [self.monotone, "update",
1926 "-r", self.revision,
1927 "-b", self.branch]
1928 c = ShellCommand(self.builder, command, self.full_srcdir,
1929 sendRC=False, timeout=self.timeout, usePTY=False)
1930 self.command = c
1931 return c.start()
1933 def doVCFull(self):
1934 return self._withFreshDb(self._doFull)
1936 def _doFull(self):
1937 command = [self.monotone, "--db=" + self.full_db_path,
1938 "checkout",
1939 "-r", self.revision,
1940 "-b", self.branch,
1941 self.full_srcdir]
1942 c = ShellCommand(self.builder, command, self.builder.basedir,
1943 sendRC=False, timeout=self.timeout, usePTY=False)
1944 self.command = c
1945 return c.start()
1947 def _withFreshDb(self, callback):
1948 self._makefulls()
1949 # first ensure the db exists and is usable
1950 if os.path.isfile(self.full_db_path):
1951 # already exists, so run 'db migrate' in case monotone has been
1952 # upgraded under us
1953 command = [self.monotone, "db", "migrate",
1954 "--db=" + self.full_db_path]
1955 else:
1956 # We'll be doing an initial pull, so up the timeout to 3 hours to
1957 # make sure it will have time to complete.
1958 self._pull_timeout = max(self._pull_timeout, 3 * 60 * 60)
1959 self.sendStatus({"header": "creating database %s\n"
1960 % (self.full_db_path,)})
1961 command = [self.monotone, "db", "init",
1962 "--db=" + self.full_db_path]
1963 c = ShellCommand(self.builder, command, self.builder.basedir,
1964 sendRC=False, timeout=self.timeout, usePTY=False)
1965 self.command = c
1966 d = c.start()
1967 d.addCallback(self._abandonOnFailure)
1968 d.addCallback(self._didDbInit)
1969 d.addCallback(self._didPull, callback)
1970 return d
1972 def _didDbInit(self, res):
1973 command = [self.monotone, "--db=" + self.full_db_path,
1974 "pull", "--ticker=dot", self.server_addr, self.branch]
1975 c = ShellCommand(self.builder, command, self.builder.basedir,
1976 sendRC=False, timeout=self._pull_timeout, usePTY=False)
1977 self.sendStatus({"header": "pulling %s from %s\n"
1978 % (self.branch, self.server_addr)})
1979 self.command = c
1980 return c.start()
1982 def _didPull(self, res, callback):
1983 return callback()
1985 registerSlaveCommand("monotone", Monotone, command_version)
1988 class Git(SourceBase):
1989 """Git specific VC operation. In addition to the arguments
1990 handled by SourceBase, this command reads the following keys:
1992 ['repourl'] (required): the upstream GIT repository string
1993 ['branch'] (optional): which version (i.e. branch or tag) to
1994 retrieve. Default: "master".
1997 header = "git operation"
1999 def setup(self, args):
2000 SourceBase.setup(self, args)
2001 self.repourl = args['repourl']
2002 self.branch = args.get('branch')
2003 if not self.branch:
2004 self.branch = "master"
2005 self.sourcedata = "%s %s\n" % (self.repourl, self.branch)
2007 def _fullSrcdir(self):
2008 return os.path.join(self.builder.basedir, self.srcdir)
2010 def _commitSpec(self):
2011 if self.revision:
2012 return self.revision
2013 return self.branch
2015 def sourcedirIsUpdateable(self):
2016 if os.path.exists(os.path.join(self._fullSrcdir(),
2017 ".buildbot-patched")):
2018 return False
2019 return os.path.isdir(os.path.join(self._fullSrcdir(), ".git"))
2021 def readSourcedata(self):
2022 return open(self.sourcedatafile, "r").read()
2024 # If the repourl matches the sourcedata file, then
2025 # we can say that the sourcedata matches. We can
2026 # ignore branch changes, since Git can work with
2027 # many branches fetched, and we deal with it properly
2028 # in doVCUpdate.
2029 def sourcedataMatches(self):
2030 try:
2031 olddata = self.readSourcedata()
2032 if not olddata.startswith(self.repourl+' '):
2033 return False
2034 except IOError:
2035 return False
2036 return True
2038 def _didFetch(self, res):
2039 if self.revision:
2040 head = self.revision
2041 else:
2042 head = 'FETCH_HEAD'
2044 command = ['git', 'reset', '--hard', head]
2045 c = ShellCommand(self.builder, command, self._fullSrcdir(),
2046 sendRC=False, timeout=self.timeout, usePTY=False)
2047 self.command = c
2048 return c.start()
2050 # Update first runs "git clean", removing local changes,
2051 # if the branch to be checked out has changed. This, combined
2052 # with the later "git reset" equates clobbering the repo,
2053 # but it's much more efficient.
2054 def doVCUpdate(self):
2055 try:
2056 # Check to see if our branch has changed
2057 diffbranch = self.sourcedata != self.readSourcedata()
2058 except IOError:
2059 diffbranch = False
2060 if diffbranch:
2061 command = ['git', 'clean', '-f', '-d', '-x']
2062 c = ShellCommand(self.builder, command, self._fullSrcdir(),
2063 sendRC=False, timeout=self.timeout, usePTY=False)
2064 self.command = c
2065 d = c.start()
2066 d.addCallback(self._abandonOnFailure)
2067 d.addCallback(self._didClean)
2068 return d
2069 return self._didClean(None)
2071 def _didClean(self, dummy):
2072 command = ['git', 'fetch', '-t', self.repourl, self.branch]
2073 self.sendStatus({"header": "fetching branch %s from %s\n"
2074 % (self.branch, self.repourl)})
2075 c = ShellCommand(self.builder, command, self._fullSrcdir(),
2076 sendRC=False, timeout=self.timeout, usePTY=False)
2077 self.command = c
2078 d = c.start()
2079 d.addCallback(self._abandonOnFailure)
2080 d.addCallback(self._didFetch)
2081 return d
2083 def _didInit(self, res):
2084 return self.doVCUpdate()
2086 def doVCFull(self):
2087 os.mkdir(self._fullSrcdir())
2088 c = ShellCommand(self.builder, ['git', 'init'], self._fullSrcdir(),
2089 sendRC=False, timeout=self.timeout, usePTY=False)
2090 self.command = c
2091 d = c.start()
2092 d.addCallback(self._abandonOnFailure)
2093 d.addCallback(self._didInit)
2094 return d
2096 def parseGotRevision(self):
2097 command = ['git', 'rev-parse', 'HEAD']
2098 c = ShellCommand(self.builder, command, self._fullSrcdir(),
2099 sendRC=False, keepStdout=True, usePTY=False)
2100 d = c.start()
2101 def _parse(res):
2102 hash = c.stdout.strip()
2103 if len(hash) != 40:
2104 return None
2105 return hash
2106 d.addCallback(_parse)
2107 return d
2109 registerSlaveCommand("git", Git, command_version)
2111 class Arch(SourceBase):
2112 """Arch-specific (tla-specific) VC operation. In addition to the
2113 arguments handled by SourceBase, this command reads the following keys:
2115 ['url'] (required): the repository string
2116 ['version'] (required): which version (i.e. branch) to retrieve
2117 ['revision'] (optional): the 'patch-NN' argument to check out
2118 ['archive']: the archive name to use. If None, use the archive's default
2119 ['build-config']: if present, give to 'tla build-config' after checkout
2122 header = "arch operation"
2123 buildconfig = None
2125 def setup(self, args):
2126 SourceBase.setup(self, args)
2127 self.vcexe = getCommand("tla")
2128 self.archive = args.get('archive')
2129 self.url = args['url']
2130 self.version = args['version']
2131 self.revision = args.get('revision')
2132 self.buildconfig = args.get('build-config')
2133 self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
2134 self.buildconfig)
2136 def sourcedirIsUpdateable(self):
2137 if self.revision:
2138 # Arch cannot roll a directory backwards, so if they ask for a
2139 # specific revision, clobber the directory. Technically this
2140 # could be limited to the cases where the requested revision is
2141 # later than our current one, but it's too hard to extract the
2142 # current revision from the tree.
2143 return False
2144 if os.path.exists(os.path.join(self.builder.basedir,
2145 self.srcdir, ".buildbot-patched")):
2146 return False
2147 return os.path.isdir(os.path.join(self.builder.basedir,
2148 self.srcdir, "{arch}"))
2150 def doVCUpdate(self):
2151 # update: possible for mode in ('copy', 'update')
2152 d = os.path.join(self.builder.basedir, self.srcdir)
2153 command = [self.vcexe, 'replay']
2154 if self.revision:
2155 command.append(self.revision)
2156 c = ShellCommand(self.builder, command, d,
2157 sendRC=False, timeout=self.timeout, usePTY=False)
2158 self.command = c
2159 return c.start()
2161 def doVCFull(self):
2162 # to do a checkout, we must first "register" the archive by giving
2163 # the URL to tla, which will go to the repository at that URL and
2164 # figure out the archive name. tla will tell you the archive name
2165 # when it is done, and all further actions must refer to this name.
2167 command = [self.vcexe, 'register-archive', '--force', self.url]
2168 c = ShellCommand(self.builder, command, self.builder.basedir,
2169 sendRC=False, keepStdout=True,
2170 timeout=self.timeout, usePTY=False)
2171 self.command = c
2172 d = c.start()
2173 d.addCallback(self._abandonOnFailure)
2174 d.addCallback(self._didRegister, c)
2175 return d
2177 def _didRegister(self, res, c):
2178 # find out what tla thinks the archive name is. If the user told us
2179 # to use something specific, make sure it matches.
2180 r = re.search(r'Registering archive: (\S+)\s*$', c.stdout)
2181 if r:
2182 msg = "tla reports archive name is '%s'" % r.group(1)
2183 log.msg(msg)
2184 self.builder.sendUpdate({'header': msg+"\n"})
2185 if self.archive and r.group(1) != self.archive:
2186 msg = (" mismatch, we wanted an archive named '%s'"
2187 % self.archive)
2188 log.msg(msg)
2189 self.builder.sendUpdate({'header': msg+"\n"})
2190 raise AbandonChain(-1)
2191 self.archive = r.group(1)
2192 assert self.archive, "need archive name to continue"
2193 return self._doGet()
2195 def _doGet(self):
2196 ver = self.version
2197 if self.revision:
2198 ver += "--%s" % self.revision
2199 command = [self.vcexe, 'get', '--archive', self.archive,
2200 '--no-pristine',
2201 ver, self.srcdir]
2202 c = ShellCommand(self.builder, command, self.builder.basedir,
2203 sendRC=False, timeout=self.timeout, usePTY=False)
2204 self.command = c
2205 d = c.start()
2206 d.addCallback(self._abandonOnFailure)
2207 if self.buildconfig:
2208 d.addCallback(self._didGet)
2209 return d
2211 def _didGet(self, res):
2212 d = os.path.join(self.builder.basedir, self.srcdir)
2213 command = [self.vcexe, 'build-config', self.buildconfig]
2214 c = ShellCommand(self.builder, command, d,
2215 sendRC=False, timeout=self.timeout, usePTY=False)
2216 self.command = c
2217 d = c.start()
2218 d.addCallback(self._abandonOnFailure)
2219 return d
2221 def parseGotRevision(self):
2222 # using code from tryclient.TlaExtractor
2223 # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
2224 # 'tla logs' gives us REVISION
2225 command = [self.vcexe, "logs", "--full", "--reverse"]
2226 c = ShellCommand(self.builder, command,
2227 os.path.join(self.builder.basedir, self.srcdir),
2228 environ=self.env,
2229 sendStdout=False, sendStderr=False, sendRC=False,
2230 keepStdout=True, usePTY=False)
2231 d = c.start()
2232 def _parse(res):
2233 tid = c.stdout.split("\n")[0].strip()
2234 slash = tid.index("/")
2235 dd = tid.rindex("--")
2236 #branch = tid[slash+1:dd]
2237 baserev = tid[dd+2:]
2238 return baserev
2239 d.addCallback(_parse)
2240 return d
2242 registerSlaveCommand("arch", Arch, command_version)
2244 class Bazaar(Arch):
2245 """Bazaar (/usr/bin/baz) is an alternative client for Arch repositories.
2246 It is mostly option-compatible, but archive registration is different
2247 enough to warrant a separate Command.
2249 ['archive'] (required): the name of the archive being used
2252 def setup(self, args):
2253 Arch.setup(self, args)
2254 self.vcexe = getCommand("baz")
2255 # baz doesn't emit the repository name after registration (and
2256 # grepping through the output of 'baz archives' is too hard), so we
2257 # require that the buildmaster configuration to provide both the
2258 # archive name and the URL.
2259 self.archive = args['archive'] # required for Baz
2260 self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
2261 self.buildconfig)
2263 # in _didRegister, the regexp won't match, so we'll stick with the name
2264 # in self.archive
2266 def _doGet(self):
2267 # baz prefers ARCHIVE/VERSION. This will work even if
2268 # my-default-archive is not set.
2269 ver = self.archive + "/" + self.version
2270 if self.revision:
2271 ver += "--%s" % self.revision
2272 command = [self.vcexe, 'get', '--no-pristine',
2273 ver, self.srcdir]
2274 c = ShellCommand(self.builder, command, self.builder.basedir,
2275 sendRC=False, timeout=self.timeout, usePTY=False)
2276 self.command = c
2277 d = c.start()
2278 d.addCallback(self._abandonOnFailure)
2279 if self.buildconfig:
2280 d.addCallback(self._didGet)
2281 return d
2283 def parseGotRevision(self):
2284 # using code from tryclient.BazExtractor
2285 command = [self.vcexe, "tree-id"]
2286 c = ShellCommand(self.builder, command,
2287 os.path.join(self.builder.basedir, self.srcdir),
2288 environ=self.env,
2289 sendStdout=False, sendStderr=False, sendRC=False,
2290 keepStdout=True, usePTY=False)
2291 d = c.start()
2292 def _parse(res):
2293 tid = c.stdout.strip()
2294 slash = tid.index("/")
2295 dd = tid.rindex("--")
2296 #branch = tid[slash+1:dd]
2297 baserev = tid[dd+2:]
2298 return baserev
2299 d.addCallback(_parse)
2300 return d
2302 registerSlaveCommand("bazaar", Bazaar, command_version)
2305 class Bzr(SourceBase):
2306 """bzr-specific VC operation. In addition to the arguments
2307 handled by SourceBase, this command reads the following keys:
2309 ['repourl'] (required): the Bzr repository string
2312 header = "bzr operation"
2314 def setup(self, args):
2315 SourceBase.setup(self, args)
2316 self.vcexe = getCommand("bzr")
2317 self.repourl = args['repourl']
2318 self.sourcedata = "%s\n" % self.repourl
2319 self.revision = self.args.get('revision')
2321 def sourcedirIsUpdateable(self):
2322 if os.path.exists(os.path.join(self.builder.basedir,
2323 self.srcdir, ".buildbot-patched")):
2324 return False
2325 if self.revision:
2326 # checking out a specific revision requires a full 'bzr checkout'
2327 return False
2328 return os.path.isdir(os.path.join(self.builder.basedir,
2329 self.srcdir, ".bzr"))
2331 def doVCUpdate(self):
2332 assert not self.revision
2333 # update: possible for mode in ('copy', 'update')
2334 srcdir = os.path.join(self.builder.basedir, self.srcdir)
2335 command = [self.vcexe, 'update']
2336 c = ShellCommand(self.builder, command, srcdir,
2337 sendRC=False, timeout=self.timeout, usePTY=False)
2338 self.command = c
2339 return c.start()
2341 def doVCFull(self):
2342 # checkout or export
2343 d = self.builder.basedir
2344 if self.mode == "export":
2345 # exporting in bzr requires a separate directory
2346 return self.doVCExport()
2347 # originally I added --lightweight here, but then 'bzr revno' is
2348 # wrong. The revno reported in 'bzr version-info' is correct,
2349 # however. Maybe this is a bzr bug?
2351 # In addition, you cannot perform a 'bzr update' on a repo pulled
2352 # from an HTTP repository that used 'bzr checkout --lightweight'. You
2353 # get a "ERROR: Cannot lock: transport is read only" when you try.
2355 # So I won't bother using --lightweight for now.
2357 command = [self.vcexe, 'checkout']
2358 if self.revision:
2359 command.append('--revision')
2360 command.append(str(self.revision))
2361 command.append(self.repourl)
2362 command.append(self.srcdir)
2364 c = ShellCommand(self.builder, command, d,
2365 sendRC=False, timeout=self.timeout, usePTY=False)
2366 self.command = c
2367 d = c.start()
2368 return d
2370 def doVCExport(self):
2371 tmpdir = os.path.join(self.builder.basedir, "export-temp")
2372 srcdir = os.path.join(self.builder.basedir, self.srcdir)
2373 command = [self.vcexe, 'checkout', '--lightweight']
2374 if self.revision:
2375 command.append('--revision')
2376 command.append(str(self.revision))
2377 command.append(self.repourl)
2378 command.append(tmpdir)
2379 c = ShellCommand(self.builder, command, self.builder.basedir,
2380 sendRC=False, timeout=self.timeout, usePTY=False)
2381 self.command = c
2382 d = c.start()
2383 def _export(res):
2384 command = [self.vcexe, 'export', srcdir]
2385 c = ShellCommand(self.builder, command, tmpdir,
2386 sendRC=False, timeout=self.timeout, usePTY=False)
2387 self.command = c
2388 return c.start()
2389 d.addCallback(_export)
2390 return d
2392 def get_revision_number(self, out):
2393 # it feels like 'bzr revno' sometimes gives different results than
2394 # the 'revno:' line from 'bzr version-info', and the one from
2395 # version-info is more likely to be correct.
2396 for line in out.split("\n"):
2397 colon = line.find(":")
2398 if colon != -1:
2399 key, value = line[:colon], line[colon+2:]
2400 if key == "revno":
2401 return int(value)
2402 raise ValueError("unable to find revno: in bzr output: '%s'" % out)
2404 def parseGotRevision(self):
2405 command = [self.vcexe, "version-info"]
2406 c = ShellCommand(self.builder, command,
2407 os.path.join(self.builder.basedir, self.srcdir),
2408 environ=self.env,
2409 sendStdout=False, sendStderr=False, sendRC=False,
2410 keepStdout=True, usePTY=False)
2411 d = c.start()
2412 def _parse(res):
2413 try:
2414 return self.get_revision_number(c.stdout)
2415 except ValueError:
2416 msg =("Bzr.parseGotRevision unable to parse output "
2417 "of bzr version-info: '%s'" % c.stdout.strip())
2418 log.msg(msg)
2419 self.sendStatus({'header': msg + "\n"})
2420 return None
2421 d.addCallback(_parse)
2422 return d
2424 registerSlaveCommand("bzr", Bzr, command_version)
2426 class Mercurial(SourceBase):
2427 """Mercurial specific VC operation. In addition to the arguments
2428 handled by SourceBase, this command reads the following keys:
2430 ['repourl'] (required): the Cogito repository string
2433 header = "mercurial operation"
2435 def setup(self, args):
2436 SourceBase.setup(self, args)
2437 self.vcexe = getCommand("hg")
2438 self.repourl = args['repourl']
2439 self.clobberOnBranchChange = args['clobberOnBranchChange']
2440 self.sourcedata = "%s\n" % self.repourl
2441 self.stdout = ""
2442 self.stderr = ""
2444 def sourcedirIsUpdateable(self):
2445 return os.path.isdir(os.path.join(self.builder.basedir,
2446 self.srcdir, ".hg"))
2448 def doVCUpdate(self):
2449 d = os.path.join(self.builder.basedir, self.srcdir)
2450 command = [self.vcexe, 'pull', '--verbose', self.repourl]
2451 c = ShellCommand(self.builder, command, d,
2452 sendRC=False, timeout=self.timeout,
2453 keepStdout=True, usePTY=False)
2454 self.command = c
2455 d = c.start()
2456 d.addCallback(self._handleEmptyUpdate)
2457 d.addCallback(self._update)
2458 return d
2460 def _handleEmptyUpdate(self, res):
2461 if type(res) is int and res == 1:
2462 if self.command.stdout.find("no changes found") != -1:
2463 # 'hg pull', when it doesn't have anything to do, exits with
2464 # rc=1, and there appears to be no way to shut this off. It
2465 # emits a distinctive message to stdout, though. So catch
2466 # this and pretend that it completed successfully.
2467 return 0
2468 return res
2470 def doVCFull(self):
2471 d = os.path.join(self.builder.basedir, self.srcdir)
2472 command = [self.vcexe, 'clone', '--verbose', '--noupdate', self.repourl, d]
2473 c = ShellCommand(self.builder, command, self.builder.basedir,
2474 sendRC=False, timeout=self.timeout, usePTY=False)
2475 self.command = c
2476 cmd1 = c.start()
2477 cmd1.addCallback(self._update)
2478 return cmd1
2480 def _clobber(self, dummy, dirname):
2481 def _vcfull(res):
2482 return self.doVCFull()
2484 c = self.doClobber(dummy, dirname)
2485 c.addCallback(_vcfull)
2487 return c
2489 def _purge(self, dummy, dirname):
2490 d = os.path.join(self.builder.basedir, self.srcdir)
2491 purge = [self.vcexe, 'purge', '--all']
2492 purgeCmd = ShellCommand(self.builder, purge, d,
2493 sendStdout=False, sendStderr=False,
2494 keepStdout=True, keepStderr=True, usePTY=False)
2496 def _clobber(res):
2497 if res != 0:
2498 # purge failed, we need to switch to a classic clobber
2499 msg = "'hg purge' failed: %s\n%s. Clobbering." % (purgeCmd.stdout, purgeCmd.stderr)
2500 self.sendStatus({'header': msg + "\n"})
2501 log.msg(msg)
2503 return self._clobber(dummy, dirname)
2505 # Purge was a success, then we need to update
2506 return self._update2(res)
2508 p = purgeCmd.start()
2509 p.addCallback(_clobber)
2510 return p
2512 def _update(self, res):
2513 if res != 0:
2514 return res
2516 # compare current branch to update
2517 self.update_branch = self.args.get('branch', 'default')
2519 d = os.path.join(self.builder.basedir, self.srcdir)
2520 parentscmd = [self.vcexe, 'identify', '--num', '--branch']
2521 cmd = ShellCommand(self.builder, parentscmd, d,
2522 sendStdout=False, sendStderr=False,
2523 keepStdout=True, keepStderr=True, usePTY=False)
2525 self.clobber = None
2527 def _parseIdentify(res):
2528 if res != 0:
2529 msg = "'hg identify' failed: %s\n%s" % (cmd.stdout, cmd.stderr)
2530 self.sendStatus({'header': msg + "\n"})
2531 log.msg(msg)
2532 return res
2534 log.msg('Output: %s' % cmd.stdout)
2536 match = re.search(r'^(.+) (.+)$', cmd.stdout)
2537 assert match
2539 rev = match.group(1)
2540 current_branch = match.group(2)
2542 if rev == '-1':
2543 msg = "Fresh hg repo, don't worry about in-repo branch name"
2544 log.msg(msg)
2546 elif os.path.exists(os.path.join(self.builder.basedir,
2547 self.srcdir, ".buildbot-patched")):
2548 self.clobber = self._purge
2550 elif self.update_branch != current_branch:
2551 msg = "Working dir is on in-repo branch '%s' and build needs '%s'." % (current_branch, self.update_branch)
2552 if self.clobberOnBranchChange:
2553 msg += ' Cloberring.'
2554 else:
2555 msg += ' Updating.'
2557 self.sendStatus({'header': msg + "\n"})
2558 log.msg(msg)
2560 # Clobbers only if clobberOnBranchChange is set
2561 if self.clobberOnBranchChange:
2562 self.clobber = self._purge
2564 else:
2565 msg = "Working dir on same in-repo branch as build (%s)." % (current_branch)
2566 log.msg(msg)
2568 return 0
2570 def _checkRepoURL(res):
2571 parentscmd = [self.vcexe, 'paths', 'default']
2572 cmd2 = ShellCommand(self.builder, parentscmd, d,
2573 sendStdout=False, sendStderr=False,
2574 keepStdout=True, keepStderr=True, usePTY=False)
2576 def _parseRepoURL(res):
2577 if res == 1:
2578 if "not found!" == cmd2.stderr.strip():
2579 msg = "hg default path not set. Not checking repo url for clobber test"
2580 log.msg(msg)
2581 return 0
2582 else:
2583 msg = "'hg paths default' failed: %s\n%s" % (cmd2.stdout, cmd2.stderr)
2584 log.msg(msg)
2585 return 1
2587 oldurl = cmd2.stdout.strip()
2589 log.msg("Repo cloned from: '%s'" % oldurl)
2591 if sys.platform == "win32":
2592 oldurl = oldurl.lower().replace('\\', '/')
2593 repourl = self.repourl.lower().replace('\\', '/')
2594 else:
2595 repourl = self.repourl
2597 if oldurl != repourl:
2598 self.clobber = self._clobber
2599 msg = "RepoURL changed from '%s' in wc to '%s' in update. Clobbering" % (oldurl, repourl)
2600 log.msg(msg)
2602 return 0
2604 c = cmd2.start()
2605 c.addCallback(_parseRepoURL)
2606 return c
2608 def _maybeClobber(res):
2609 if self.clobber:
2610 msg = "Clobber flag set. Doing clobbering"
2611 log.msg(msg)
2613 def _vcfull(res):
2614 return self.doVCFull()
2616 return self.clobber(None, self.srcdir)
2618 return 0
2620 c = cmd.start()
2621 c.addCallback(_parseIdentify)
2622 c.addCallback(_checkRepoURL)
2623 c.addCallback(_maybeClobber)
2624 c.addCallback(self._update2)
2625 return c
2627 def _update2(self, res):
2628 d = os.path.join(self.builder.basedir, self.srcdir)
2630 updatecmd=[self.vcexe, 'update', '--clean', '--repository', d]
2631 if self.args.get('revision'):
2632 updatecmd.extend(['--rev', self.args['revision']])
2633 else:
2634 updatecmd.extend(['--rev', self.args.get('branch', 'default')])
2635 self.command = ShellCommand(self.builder, updatecmd,
2636 self.builder.basedir, sendRC=False,
2637 timeout=self.timeout, usePTY=False)
2638 return self.command.start()
2640 def parseGotRevision(self):
2641 # we use 'hg identify' to find out what we wound up with
2642 command = [self.vcexe, "identify"]
2643 c = ShellCommand(self.builder, command,
2644 os.path.join(self.builder.basedir, self.srcdir),
2645 environ=self.env,
2646 sendStdout=False, sendStderr=False, sendRC=False,
2647 keepStdout=True, usePTY=False)
2648 d = c.start()
2649 def _parse(res):
2650 m = re.search(r'^(\w+)', c.stdout)
2651 return m.group(1)
2652 d.addCallback(_parse)
2653 return d
2655 registerSlaveCommand("hg", Mercurial, command_version)
2658 class P4Base(SourceBase):
2659 """Base class for P4 source-updaters
2661 ['p4port'] (required): host:port for server to access
2662 ['p4user'] (optional): user to use for access
2663 ['p4passwd'] (optional): passwd to try for the user
2664 ['p4client'] (optional): client spec to use
2666 def setup(self, args):
2667 SourceBase.setup(self, args)
2668 self.p4port = args['p4port']
2669 self.p4client = args['p4client']
2670 self.p4user = args['p4user']
2671 self.p4passwd = args['p4passwd']
2673 def parseGotRevision(self):
2674 # Executes a p4 command that will give us the latest changelist number
2675 # of any file under the current (or default) client:
2676 command = ['p4']
2677 if self.p4port:
2678 command.extend(['-p', self.p4port])
2679 if self.p4user:
2680 command.extend(['-u', self.p4user])
2681 if self.p4passwd:
2682 command.extend(['-P', self.p4passwd])
2683 if self.p4client:
2684 command.extend(['-c', self.p4client])
2685 command.extend(['changes', '-m', '1', '#have'])
2686 c = ShellCommand(self.builder, command, self.builder.basedir,
2687 environ=self.env, timeout=self.timeout,
2688 sendStdout=True, sendStderr=False, sendRC=False,
2689 keepStdout=True, usePTY=False)
2690 self.command = c
2691 d = c.start()
2693 def _parse(res):
2694 # 'p4 -c clien-name change -m 1 "#have"' will produce an output like:
2695 # "Change 28147 on 2008/04/07 by p4user@hostname..."
2696 # The number after "Change" is the one we want.
2697 m = re.match('Change\s+(\d+)\s+', c.stdout)
2698 if m:
2699 return m.group(1)
2700 return None
2701 d.addCallback(_parse)
2702 return d
2705 class P4(P4Base):
2706 """A P4 source-updater.
2708 ['p4port'] (required): host:port for server to access
2709 ['p4user'] (optional): user to use for access
2710 ['p4passwd'] (optional): passwd to try for the user
2711 ['p4client'] (optional): client spec to use
2712 ['p4extra_views'] (optional): additional client views to use
2715 header = "p4"
2717 def setup(self, args):
2718 P4Base.setup(self, args)
2719 self.p4base = args['p4base']
2720 self.p4extra_views = args['p4extra_views']
2721 self.p4mode = args['mode']
2722 self.p4branch = args['branch']
2724 self.sourcedata = str([
2725 # Perforce server.
2726 self.p4port,
2728 # Client spec.
2729 self.p4client,
2731 # Depot side of view spec.
2732 self.p4base,
2733 self.p4branch,
2734 self.p4extra_views,
2736 # Local side of view spec (srcdir is made from these).
2737 self.builder.basedir,
2738 self.mode,
2739 self.workdir
2743 def sourcedirIsUpdateable(self):
2744 if os.path.exists(os.path.join(self.builder.basedir,
2745 self.srcdir, ".buildbot-patched")):
2746 return False
2747 # We assume our client spec is still around.
2748 # We just say we aren't updateable if the dir doesn't exist so we
2749 # don't get ENOENT checking the sourcedata.
2750 return os.path.isdir(os.path.join(self.builder.basedir,
2751 self.srcdir))
2753 def doVCUpdate(self):
2754 return self._doP4Sync(force=False)
2756 def _doP4Sync(self, force):
2757 command = ['p4']
2759 if self.p4port:
2760 command.extend(['-p', self.p4port])
2761 if self.p4user:
2762 command.extend(['-u', self.p4user])
2763 if self.p4passwd:
2764 command.extend(['-P', self.p4passwd])
2765 if self.p4client:
2766 command.extend(['-c', self.p4client])
2767 command.extend(['sync'])
2768 if force:
2769 command.extend(['-f'])
2770 if self.revision:
2771 command.extend(['@' + str(self.revision)])
2772 env = {}
2773 c = ShellCommand(self.builder, command, self.builder.basedir,
2774 environ=env, sendRC=False, timeout=self.timeout,
2775 keepStdout=True, usePTY=False)
2776 self.command = c
2777 d = c.start()
2778 d.addCallback(self._abandonOnFailure)
2779 return d
2782 def doVCFull(self):
2783 env = {}
2784 command = ['p4']
2785 client_spec = ''
2786 client_spec += "Client: %s\n\n" % self.p4client
2787 client_spec += "Owner: %s\n\n" % self.p4user
2788 client_spec += "Description:\n\tCreated by %s\n\n" % self.p4user
2789 client_spec += "Root:\t%s\n\n" % self.builder.basedir
2790 client_spec += "Options:\tallwrite rmdir\n\n"
2791 client_spec += "LineEnd:\tlocal\n\n"
2793 # Setup a view
2794 client_spec += "View:\n\t%s" % (self.p4base)
2795 if self.p4branch:
2796 client_spec += "%s/" % (self.p4branch)
2797 client_spec += "... //%s/%s/...\n" % (self.p4client, self.srcdir)
2798 if self.p4extra_views:
2799 for k, v in self.p4extra_views:
2800 client_spec += "\t%s/... //%s/%s%s/...\n" % (k, self.p4client,
2801 self.srcdir, v)
2802 if self.p4port:
2803 command.extend(['-p', self.p4port])
2804 if self.p4user:
2805 command.extend(['-u', self.p4user])
2806 if self.p4passwd:
2807 command.extend(['-P', self.p4passwd])
2808 command.extend(['client', '-i'])
2809 log.msg(client_spec)
2810 c = ShellCommand(self.builder, command, self.builder.basedir,
2811 environ=env, sendRC=False, timeout=self.timeout,
2812 initialStdin=client_spec, usePTY=False)
2813 self.command = c
2814 d = c.start()
2815 d.addCallback(self._abandonOnFailure)
2816 d.addCallback(lambda _: self._doP4Sync(force=True))
2817 return d
2819 registerSlaveCommand("p4", P4, command_version)
2822 class P4Sync(P4Base):
2823 """A partial P4 source-updater. Requires manual setup of a per-slave P4
2824 environment. The only thing which comes from the master is P4PORT.
2825 'mode' is required to be 'copy'.
2827 ['p4port'] (required): host:port for server to access
2828 ['p4user'] (optional): user to use for access
2829 ['p4passwd'] (optional): passwd to try for the user
2830 ['p4client'] (optional): client spec to use
2833 header = "p4 sync"
2835 def setup(self, args):
2836 P4Base.setup(self, args)
2837 self.vcexe = getCommand("p4")
2839 def sourcedirIsUpdateable(self):
2840 return True
2842 def _doVC(self, force):
2843 d = os.path.join(self.builder.basedir, self.srcdir)
2844 command = [self.vcexe]
2845 if self.p4port:
2846 command.extend(['-p', self.p4port])
2847 if self.p4user:
2848 command.extend(['-u', self.p4user])
2849 if self.p4passwd:
2850 command.extend(['-P', self.p4passwd])
2851 if self.p4client:
2852 command.extend(['-c', self.p4client])
2853 command.extend(['sync'])
2854 if force:
2855 command.extend(['-f'])
2856 if self.revision:
2857 command.extend(['@' + self.revision])
2858 env = {}
2859 c = ShellCommand(self.builder, command, d, environ=env,
2860 sendRC=False, timeout=self.timeout, usePTY=False)
2861 self.command = c
2862 return c.start()
2864 def doVCUpdate(self):
2865 return self._doVC(force=False)
2867 def doVCFull(self):
2868 return self._doVC(force=True)
2870 registerSlaveCommand("p4sync", P4Sync, command_version)