initial support for Monotone, by Nathaniel Smith
[buildbot.git] / buildbot / slave / commands.py
blob22e2a3e2552cb2a73619728e4470705013c296d4
1 # -*- test-case-name: buildbot.test.test_slavecommand -*-
3 import os, re, signal, shutil, types, time
4 from stat import ST_CTIME, ST_MTIME, ST_SIZE
6 from twisted.internet.protocol import ProcessProtocol
7 from twisted.internet import reactor, defer, task
8 from twisted.python import log, failure, runtime
10 from buildbot.twcompat import implements, which
11 from buildbot.slave.interfaces import ISlaveCommand
12 from buildbot.slave.registry import registerSlaveCommand
14 # this used to be a CVS $-style "Revision" auto-updated keyword, but since I
15 # moved to Darcs as the primary repository, this is updated manually each
16 # time this file is changed. The last cvs_ver that was here was 1.51 .
17 command_version = "2.2"
19 # version history:
20 # >=1.17: commands are interruptable
21 # >=1.28: Arch understands 'revision', added Bazaar
22 # >=1.33: Source classes understand 'retry'
23 # >=1.39: Source classes correctly handle changes in branch (except Git)
24 # Darcs accepts 'revision' (now all do but Git) (well, and P4Sync)
25 # Arch/Baz should accept 'build-config'
26 # >=1.51: (release 0.7.3)
27 # >= 2.1: SlaveShellCommand now accepts 'initial_stdin', 'keep_stdin_open',
28 # and 'logfiles'. It now sends 'log' messages in addition to
29 # stdout/stdin/header/rc. It acquired writeStdin/closeStdin methods,
30 # but these are not remotely callable yet.
31 # (not externally visible: ShellCommandPP has writeStdin/closeStdin.
32 # ShellCommand accepts new arguments (logfiles=, initialStdin=,
33 # keepStdinOpen=) and no longer accepts stdin=)
34 # (release 0.7.4)
35 # >= 2.2: added monotone, uploadFile, and downloadFile
37 class CommandInterrupted(Exception):
38 pass
39 class TimeoutError(Exception):
40 pass
42 class AbandonChain(Exception):
43 """A series of chained steps can raise this exception to indicate that
44 one of the intermediate ShellCommands has failed, such that there is no
45 point in running the remainder. 'rc' should be the non-zero exit code of
46 the failing ShellCommand."""
48 def __repr__(self):
49 return "<AbandonChain rc=%s>" % self.args[0]
51 def getCommand(name):
52 possibles = which(name)
53 if not possibles:
54 raise RuntimeError("Couldn't find executable for '%s'" % name)
55 return possibles[0]
57 def rmdirRecursive(dir):
58 """This is a replacement for shutil.rmtree that works better under
59 windows. Thanks to Bear at the OSAF for the code."""
60 if not os.path.exists(dir):
61 return
63 if os.path.islink(dir):
64 os.remove(dir)
65 return
67 for name in os.listdir(dir):
68 full_name = os.path.join(dir, name)
69 # on Windows, if we don't have write permission we can't remove
70 # the file/directory either, so turn that on
71 if os.name == 'nt':
72 if not os.access(full_name, os.W_OK):
73 os.chmod(full_name, 0600)
74 if os.path.isdir(full_name):
75 rmdirRecursive(full_name)
76 else:
77 # print "removing file", full_name
78 os.remove(full_name)
79 os.rmdir(dir)
81 class ShellCommandPP(ProcessProtocol):
82 debug = False
84 def __init__(self, command):
85 self.command = command
86 self.pending_stdin = ""
87 self.stdin_finished = False
89 def writeStdin(self, data):
90 assert not self.stdin_finished
91 if self.connected:
92 self.transport.write(data)
93 else:
94 self.pending_stdin += data
96 def closeStdin(self):
97 if self.connected:
98 if self.debug: log.msg(" closing stdin")
99 self.transport.closeStdin()
100 self.stdin_finished = True
102 def connectionMade(self):
103 if self.debug:
104 log.msg("ShellCommandPP.connectionMade")
105 if not self.command.process:
106 if self.debug:
107 log.msg(" assigning self.command.process: %s" %
108 (self.transport,))
109 self.command.process = self.transport
111 # TODO: maybe we shouldn't close stdin when using a PTY. I can't test
112 # this yet, recent debian glibc has a bug which causes thread-using
113 # test cases to SIGHUP trial, and the workaround is to either run
114 # the whole test with /bin/sh -c " ".join(argv) (way gross) or to
115 # not use a PTY. Once the bug is fixed, I'll be able to test what
116 # happens when you close stdin on a pty. My concern is that it will
117 # SIGHUP the child (since we are, in a sense, hanging up on them).
118 # But it may well be that keeping stdout open prevents the SIGHUP
119 # from being sent.
120 #if not self.command.usePTY:
122 if self.pending_stdin:
123 if self.debug: log.msg(" writing to stdin")
124 self.transport.write(self.pending_stdin)
125 if self.stdin_finished:
126 if self.debug: log.msg(" closing stdin")
127 self.transport.closeStdin()
129 def outReceived(self, data):
130 if self.debug:
131 log.msg("ShellCommandPP.outReceived")
132 self.command.addStdout(data)
134 def errReceived(self, data):
135 if self.debug:
136 log.msg("ShellCommandPP.errReceived")
137 self.command.addStderr(data)
139 def processEnded(self, status_object):
140 if self.debug:
141 log.msg("ShellCommandPP.processEnded", status_object)
142 # status_object is a Failure wrapped around an
143 # error.ProcessTerminated or and error.ProcessDone.
144 # requires twisted >= 1.0.4 to overcome a bug in process.py
145 sig = status_object.value.signal
146 rc = status_object.value.exitCode
147 self.command.finished(sig, rc)
149 class LogFileWatcher:
150 POLL_INTERVAL = 2
152 def __init__(self, command, name, logfile):
153 self.command = command
154 self.name = name
155 self.logfile = logfile
156 log.msg("LogFileWatcher created to watch %s" % logfile)
157 # we are created before the ShellCommand starts. If the logfile we're
158 # supposed to be watching already exists, record its size and
159 # ctime/mtime so we can tell when it starts to change.
160 self.old_logfile_stats = self.statFile()
161 self.started = False
163 # every 2 seconds we check on the file again
164 self.poller = task.LoopingCall(self.poll)
166 def start(self):
167 self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
169 def _cleanupPoll(self, err):
170 log.err(err, msg="Polling error")
171 self.poller = None
173 def stop(self):
174 self.poll()
175 if self.poller is not None:
176 self.poller.stop()
178 def statFile(self):
179 if os.path.exists(self.logfile):
180 s = os.stat(self.logfile)
181 return (s[ST_CTIME], s[ST_MTIME], s[ST_SIZE])
182 return None
184 def poll(self):
185 if not self.started:
186 s = self.statFile()
187 if s == self.old_logfile_stats:
188 return # not started yet
189 if not s:
190 # the file was there, but now it's deleted. Forget about the
191 # initial state, clearly the process has deleted the logfile
192 # in preparation for creating a new one.
193 self.old_logfile_stats = None
194 return # no file to work with
195 self.f = open(self.logfile, "rb")
196 self.started = True
197 while True:
198 data = self.f.read(10000)
199 if not data:
200 return
201 self.command.addLogfile(self.name, data)
204 class ShellCommand:
205 # This is a helper class, used by SlaveCommands to run programs in a
206 # child shell.
208 notreally = False
209 BACKUP_TIMEOUT = 5
210 KILL = "KILL"
212 def __init__(self, builder, command,
213 workdir, environ=None,
214 sendStdout=True, sendStderr=True, sendRC=True,
215 timeout=None, initialStdin=None, keepStdinOpen=False,
216 keepStdout=False,
217 logfiles={}):
220 @param keepStdout: if True, we keep a copy of all the stdout text
221 that we've seen. This copy is available in
222 self.stdout, which can be read after the command
223 has finished.
227 self.builder = builder
228 self.command = command
229 self.sendStdout = sendStdout
230 self.sendStderr = sendStderr
231 self.sendRC = sendRC
232 self.logfiles = logfiles
233 self.workdir = workdir
234 self.environ = os.environ.copy()
235 if environ:
236 if environ.has_key('PYTHONPATH'):
237 ppath = environ['PYTHONPATH']
238 # Need to do os.pathsep translation. We could either do that
239 # by replacing all incoming ':'s with os.pathsep, or by
240 # accepting lists. I like lists better.
241 if not isinstance(ppath, str):
242 # If it's not a string, treat it as a sequence to be
243 # turned in to a string.
244 ppath = os.pathsep.join(ppath)
246 if self.environ.has_key('PYTHONPATH'):
247 # special case, prepend the builder's items to the
248 # existing ones. This will break if you send over empty
249 # strings, so don't do that.
250 ppath = ppath + os.pathsep + self.environ['PYTHONPATH']
252 environ['PYTHONPATH'] = ppath
254 self.environ.update(environ)
255 self.initialStdin = initialStdin
256 self.keepStdinOpen = keepStdinOpen
257 self.timeout = timeout
258 self.timer = None
259 self.keepStdout = keepStdout
261 # usePTY=True is a convenience for cleaning up all children and
262 # grandchildren of a hung command. Fall back to usePTY=False on
263 # systems where ptys cause problems.
265 self.usePTY = self.builder.usePTY
266 if runtime.platformType != "posix":
267 self.usePTY = False # PTYs are posix-only
268 if initialStdin is not None:
269 # for .closeStdin to matter, we must use a pipe, not a PTY
270 self.usePTY = False
272 self.logFileWatchers = []
273 for name,filename in self.logfiles.items():
274 w = LogFileWatcher(self, name,
275 os.path.join(self.workdir, filename))
276 self.logFileWatchers.append(w)
278 def __repr__(self):
279 return "<slavecommand.ShellCommand '%s'>" % self.command
281 def sendStatus(self, status):
282 self.builder.sendUpdate(status)
284 def start(self):
285 # return a Deferred which fires (with the exit code) when the command
286 # completes
287 if self.keepStdout:
288 self.stdout = ""
289 self.deferred = defer.Deferred()
290 try:
291 self._startCommand()
292 except:
293 log.msg("error in ShellCommand._startCommand")
294 log.err()
295 # pretend it was a shell error
296 self.deferred.errback(AbandonChain(-1))
297 return self.deferred
299 def _startCommand(self):
300 log.msg("ShellCommand._startCommand")
301 if self.notreally:
302 self.sendStatus({'header': "command '%s' in dir %s" % \
303 (self.command, self.workdir)})
304 self.sendStatus({'header': "(not really)\n"})
305 self.finished(None, 0)
306 return
308 self.pp = ShellCommandPP(self)
310 if type(self.command) in types.StringTypes:
311 if runtime.platformType == 'win32':
312 argv = [os.environ['COMSPEC'], '/c', self.command]
313 else:
314 # for posix, use /bin/sh. for other non-posix, well, doesn't
315 # hurt to try
316 argv = ['/bin/sh', '-c', self.command]
317 else:
318 if runtime.platformType == 'win32':
319 argv = [os.environ['COMSPEC'], '/c'] + list(self.command)
320 else:
321 argv = self.command
323 # self.stdin is handled in ShellCommandPP.connectionMade
325 # first header line is the command in plain text, argv joined with
326 # spaces. You should be able to cut-and-paste this into a shell to
327 # obtain the same results. If there are spaces in the arguments, too
328 # bad.
329 msg = " ".join(argv)
330 log.msg(" " + msg)
331 self.sendStatus({'header': msg+"\n"})
333 # then comes the secondary information
334 msg = " in dir %s" % (self.workdir,)
335 if self.timeout:
336 msg += " (timeout %d secs)" % (self.timeout,)
337 log.msg(" " + msg)
338 self.sendStatus({'header': msg+"\n"})
340 msg = " watching logfiles %s" % (self.logfiles,)
341 log.msg(" " + msg)
342 self.sendStatus({'header': msg+"\n"})
344 # then the argv array for resolving unambiguity
345 msg = " argv: %s" % (argv,)
346 log.msg(" " + msg)
347 self.sendStatus({'header': msg+"\n"})
349 # then the environment, since it sometimes causes problems
350 msg = " environment: %s" % (self.environ,)
351 log.msg(" " + msg)
352 self.sendStatus({'header': msg+"\n"})
354 # this will be buffered until connectionMade is called
355 if self.initialStdin:
356 self.pp.writeStdin(self.initialStdin)
357 if not self.keepStdinOpen:
358 self.pp.closeStdin()
360 # win32eventreactor's spawnProcess (under twisted <= 2.0.1) returns
361 # None, as opposed to all the posixbase-derived reactors (which
362 # return the new Process object). This is a nuisance. We can make up
363 # for it by having the ProcessProtocol give us their .transport
364 # attribute after they get one. I'd prefer to get it from
365 # spawnProcess because I'm concerned about returning from this method
366 # without having a valid self.process to work with. (if kill() were
367 # called right after we return, but somehow before connectionMade
368 # were called, then kill() would blow up).
369 self.process = None
370 p = reactor.spawnProcess(self.pp, argv[0], argv,
371 self.environ,
372 self.workdir,
373 usePTY=self.usePTY)
374 # connectionMade might have been called during spawnProcess
375 if not self.process:
376 self.process = p
378 # connectionMade also closes stdin as long as we're not using a PTY.
379 # This is intended to kill off inappropriately interactive commands
380 # better than the (long) hung-command timeout. ProcessPTY should be
381 # enhanced to allow the same childFDs argument that Process takes,
382 # which would let us connect stdin to /dev/null .
384 if self.timeout:
385 self.timer = reactor.callLater(self.timeout, self.doTimeout)
387 for w in self.logFileWatchers:
388 w.start()
391 def addStdout(self, data):
392 if self.sendStdout:
393 self.sendStatus({'stdout': data})
394 if self.keepStdout:
395 self.stdout += data
396 if self.timer:
397 self.timer.reset(self.timeout)
399 def addStderr(self, data):
400 if self.sendStderr:
401 self.sendStatus({'stderr': data})
402 if self.timer:
403 self.timer.reset(self.timeout)
405 def addLogfile(self, name, data):
406 self.sendStatus({'log': (name, data)})
407 if self.timer:
408 self.timer.reset(self.timeout)
410 def finished(self, sig, rc):
411 log.msg("command finished with signal %s, exit code %s" % (sig,rc))
412 for w in self.logFileWatchers:
413 # this will send the final updates
414 w.stop()
415 if sig is not None:
416 rc = -1
417 if self.sendRC:
418 if sig is not None:
419 self.sendStatus(
420 {'header': "process killed by signal %d\n" % sig})
421 self.sendStatus({'rc': rc})
422 if self.timer:
423 self.timer.cancel()
424 self.timer = None
425 d = self.deferred
426 self.deferred = None
427 if d:
428 d.callback(rc)
429 else:
430 log.msg("Hey, command %s finished twice" % self)
432 def failed(self, why):
433 log.msg("ShellCommand.failed: command failed: %s" % (why,))
434 if self.timer:
435 self.timer.cancel()
436 self.timer = None
437 d = self.deferred
438 self.deferred = None
439 if d:
440 d.errback(why)
441 else:
442 log.msg("Hey, command %s finished twice" % self)
444 def doTimeout(self):
445 self.timer = None
446 msg = "command timed out: %d seconds without output" % self.timeout
447 self.kill(msg)
449 def kill(self, msg):
450 # This may be called by the timeout, or when the user has decided to
451 # abort this build.
452 if self.timer:
453 self.timer.cancel()
454 self.timer = None
455 if hasattr(self.process, "pid"):
456 msg += ", killing pid %d" % self.process.pid
457 log.msg(msg)
458 self.sendStatus({'header': "\n" + msg + "\n"})
460 hit = 0
461 if runtime.platformType == "posix":
462 try:
463 # really want to kill off all child processes too. Process
464 # Groups are ideal for this, but that requires
465 # spawnProcess(usePTY=1). Try both ways in case process was
466 # not started that way.
468 # the test suite sets self.KILL=None to tell us we should
469 # only pretend to kill the child. This lets us test the
470 # backup timer.
472 sig = None
473 if self.KILL is not None:
474 sig = getattr(signal, "SIG"+ self.KILL, None)
476 if self.KILL == None:
477 log.msg("self.KILL==None, only pretending to kill child")
478 elif sig is None:
479 log.msg("signal module is missing SIG%s" % self.KILL)
480 elif not hasattr(os, "kill"):
481 log.msg("os module is missing the 'kill' function")
482 else:
483 log.msg("trying os.kill(-pid, %d)" % (sig,))
484 # TODO: maybe use os.killpg instead of a negative pid?
485 os.kill(-self.process.pid, sig)
486 log.msg(" signal %s sent successfully" % sig)
487 hit = 1
488 except OSError:
489 # probably no-such-process, maybe because there is no process
490 # group
491 pass
492 if not hit:
493 try:
494 if self.KILL is None:
495 log.msg("self.KILL==None, only pretending to kill child")
496 else:
497 log.msg("trying process.signalProcess('KILL')")
498 self.process.signalProcess(self.KILL)
499 log.msg(" signal %s sent successfully" % (self.KILL,))
500 hit = 1
501 except OSError:
502 # could be no-such-process, because they finished very recently
503 pass
504 if not hit:
505 log.msg("signalProcess/os.kill failed both times")
507 if runtime.platformType == "posix":
508 # we only do this under posix because the win32eventreactor
509 # blocks here until the process has terminated, while closing
510 # stderr. This is weird.
511 self.pp.transport.loseConnection()
513 # finished ought to be called momentarily. Just in case it doesn't,
514 # set a timer which will abandon the command.
515 self.timer = reactor.callLater(self.BACKUP_TIMEOUT,
516 self.doBackupTimeout)
518 def doBackupTimeout(self):
519 log.msg("we tried to kill the process, and it wouldn't die.."
520 " finish anyway")
521 self.timer = None
522 self.sendStatus({'header': "SIGKILL failed to kill process\n"})
523 if self.sendRC:
524 self.sendStatus({'header': "using fake rc=-1\n"})
525 self.sendStatus({'rc': -1})
526 self.failed(TimeoutError("SIGKILL failed to kill process"))
529 def writeStdin(self, data):
530 self.pp.writeStdin(data)
532 def closeStdin(self):
533 self.pp.closeStdin()
536 class Command:
537 if implements:
538 implements(ISlaveCommand)
539 else:
540 __implements__ = ISlaveCommand
542 """This class defines one command that can be invoked by the build master.
543 The command is executed on the slave side, and always sends back a
544 completion message when it finishes. It may also send intermediate status
545 as it runs (by calling builder.sendStatus). Some commands can be
546 interrupted (either by the build master or a local timeout), in which
547 case the step is expected to complete normally with a status message that
548 indicates an error occurred.
550 These commands are used by BuildSteps on the master side. Each kind of
551 BuildStep uses a single Command. The slave must implement all the
552 Commands required by the set of BuildSteps used for any given build:
553 this is checked at startup time.
555 All Commands are constructed with the same signature:
556 c = CommandClass(builder, args)
557 where 'builder' is the parent SlaveBuilder object, and 'args' is a
558 dict that is interpreted per-command.
560 The setup(args) method is available for setup, and is run from __init__.
562 The Command is started with start(). This method must be implemented in a
563 subclass, and it should return a Deferred. When your step is done, you
564 should fire the Deferred (the results are not used). If the command is
565 interrupted, it should fire the Deferred anyway.
567 While the command runs. it may send status messages back to the
568 buildmaster by calling self.sendStatus(statusdict). The statusdict is
569 interpreted by the master-side BuildStep however it likes.
571 A separate completion message is sent when the deferred fires, which
572 indicates that the Command has finished, but does not carry any status
573 data. If the Command needs to return an exit code of some sort, that
574 should be sent as a regular status message before the deferred is fired .
575 Once builder.commandComplete has been run, no more status messages may be
576 sent.
578 If interrupt() is called, the Command should attempt to shut down as
579 quickly as possible. Child processes should be killed, new ones should
580 not be started. The Command should send some kind of error status update,
581 then complete as usual by firing the Deferred.
583 .interrupted should be set by interrupt(), and can be tested to avoid
584 sending multiple error status messages.
586 If .running is False, the bot is shutting down (or has otherwise lost the
587 connection to the master), and should not send any status messages. This
588 is checked in Command.sendStatus .
592 # builder methods:
593 # sendStatus(dict) (zero or more)
594 # commandComplete() or commandInterrupted() (one, at end)
596 debug = False
597 interrupted = False
598 running = False # set by Builder, cleared on shutdown or when the
599 # Deferred fires
601 def __init__(self, builder, stepId, args):
602 self.builder = builder
603 self.stepId = stepId # just for logging
604 self.args = args
605 self.setup(args)
607 def setup(self, args):
608 """Override this in a subclass to extract items from the args dict."""
609 pass
611 def doStart(self):
612 self.running = True
613 d = defer.maybeDeferred(self.start)
614 d.addBoth(self.commandComplete)
615 return d
617 def start(self):
618 """Start the command. This method should return a Deferred that will
619 fire when the command has completed. The Deferred's argument will be
620 ignored.
622 This method should be overridden by subclasses."""
623 raise NotImplementedError, "You must implement this in a subclass"
625 def sendStatus(self, status):
626 """Send a status update to the master."""
627 if self.debug:
628 log.msg("sendStatus", status)
629 if not self.running:
630 log.msg("would sendStatus but not .running")
631 return
632 self.builder.sendUpdate(status)
634 def doInterrupt(self):
635 self.running = False
636 self.interrupt()
638 def interrupt(self):
639 """Override this in a subclass to allow commands to be interrupted.
640 May be called multiple times, test and set self.interrupted=True if
641 this matters."""
642 pass
644 def commandComplete(self, res):
645 self.running = False
646 return res
648 # utility methods, mostly used by SlaveShellCommand and the like
650 def _abandonOnFailure(self, rc):
651 if type(rc) is not int:
652 log.msg("weird, _abandonOnFailure was given rc=%s (%s)" % \
653 (rc, type(rc)))
654 assert isinstance(rc, int)
655 if rc != 0:
656 raise AbandonChain(rc)
657 return rc
659 def _sendRC(self, res):
660 self.sendStatus({'rc': 0})
662 def _checkAbandoned(self, why):
663 log.msg("_checkAbandoned", why)
664 why.trap(AbandonChain)
665 log.msg(" abandoning chain", why.value)
666 self.sendStatus({'rc': why.value.args[0]})
667 return None
671 class SlaveFileUploadCommand(Command):
673 Upload a file from slave to build master
674 Arguments:
676 - ['workdir']: directory to use
677 - ['slavesrc']: name of the file to upload to the buildmaster
678 - ['writer']: object for remote writing
679 - ['maxsize']: max size (in bytes) of file to write
680 - ['blocksize']: max size for one data block
684 debug = False
686 def setup(self,args):
687 self.workdir = args['workdir']
688 self.filename = os.path.basename(args['slavesrc'])
689 self.writer = args['writer']
690 self.maxsize = args['maxsize']
691 self.blocksize = args['blocksize']
692 self.stderr = None
693 self.rc = 0
695 if self.debug:
696 log.msg('SlaveFileUploadCommand started')
698 # Open file
699 self.path = os.path.join(self.builder.basedir,
700 self.workdir,
701 self.filename)
702 try:
703 self.fp = open(self.path, 'r')
704 if self.debug:
705 log.msg('Opened %r for upload' % self.path)
706 except:
707 self.fp = None
708 self.stderr = 'Cannot open file %r for upload' % self.path
709 self.rc = 1
710 if self.debug:
711 log.msg('Cannot open file %r for upload' % self.path)
714 def start(self):
715 self.cmd = defer.Deferred()
716 reactor.callLater(0, self._writeBlock)
718 return self.cmd
720 def _writeBlock(self):
722 Write a block of data to the remote writer
724 if self.interrupted or self.fp is None:
725 if self.debug:
726 log.msg('SlaveFileUploadCommand._writeBlock(): end')
727 d = self.writer.callRemote('close')
728 d.addCallback(lambda _: self.finished())
729 return
731 length = self.blocksize
732 if self.maxsize is not None and length > self.maxsize:
733 length = self.maxsize
735 if length <= 0:
736 if self.stderr is None:
737 self.stderr = 'Maximum filesize reached, truncating file %r' \
738 % self.path
739 self.rc = 1
740 data = ''
741 else:
742 data = self.fp.read(length)
744 if self.debug:
745 log.msg('SlaveFileUploadCommand._writeBlock(): '+
746 'allowed=%d readlen=%d' % (length,len(data)))
747 if len(data) == 0:
748 d = self.writer.callRemote('close')
749 d.addCallback(lambda _: self.finished())
750 else:
751 if self.maxsize is not None:
752 self.maxsize = self.maxsize - len(data)
753 assert self.maxsize >= 0
754 d = self.writer.callRemote('write',data)
755 d.addCallback(lambda _: self._writeBlock())
758 def interrupt(self):
759 if self.debug:
760 log.msg('interrupted')
761 if self.interrupted:
762 return
763 if self.stderr is None:
764 self.stderr = 'Upload of %r interrupted' % self.path
765 self.rc = 1
766 self.interrupted = True
767 self.finished()
770 def finished(self):
771 if self.debug:
772 log.msg('finished: stderr=%r, rc=%r' % (self.stderr,self.rc))
773 if self.stderr is None:
774 self.sendStatus({'rc':self.rc})
775 else:
776 self.sendStatus({'stderr':self.stderr, 'rc':self.rc})
777 self.cmd.callback(0)
779 registerSlaveCommand("uploadFile", SlaveFileUploadCommand, command_version)
782 class SlaveFileDownloadCommand(Command):
784 Download a file from master to slave
785 Arguments:
787 - ['workdir']: directory to use
788 - ['slavedest']: name of the file to upload to the buildmaster
789 - ['reader']: object for remote reading
790 - ['maxsize']: max size (in bytes) of file to write
791 - ['blocksize']: max size for one data block
794 debug = False
796 def setup(self,args):
797 self.workdir = args['workdir']
798 self.filename = os.path.basename(args['slavedest'])
799 self.reader = args['reader']
800 self.maxsize = args['maxsize']
801 self.blocksize = args['blocksize']
802 self.stderr = None
803 self.rc = 0
805 if self.debug:
806 log.msg('SlaveFileDownloadCommand started')
808 # Open file
809 self.path = os.path.join(self.builder.basedir,
810 self.workdir,
811 self.filename)
812 try:
813 self.fp = open(self.path, 'w')
814 if self.debug:
815 log.msg('Opened %r for download' % self.path)
816 except:
817 self.fp = None
818 self.stderr = 'Cannot open file %r for download' % self.path
819 self.rc = 1
820 if self.debug:
821 log.msg('Cannot open file %r for download' % self.path)
824 def start(self):
825 self.cmd = defer.Deferred()
826 reactor.callLater(0, self._readBlock)
828 return self.cmd
830 def _readBlock(self):
832 Read a block of data from the remote reader
834 if self.interrupted or self.fp is None:
835 if self.debug:
836 log.msg('SlaveFileDownloadCommand._readBlock(): end')
837 d = self.reader.callRemote('close')
838 d.addCallback(lambda _: self.finished())
839 return
841 length = self.blocksize
842 if self.maxsize is not None and length > self.maxsize:
843 length = self.maxsize
845 if length <= 0:
846 if self.stderr is None:
847 self.stderr = 'Maximum filesize reached, truncating file %r' \
848 % self.path
849 self.rc = 1
850 d = self.reader.callRemote('close')
851 d.addCallback(lambda _: self.finished())
852 else:
853 d = self.reader.callRemote('read', length)
854 d.addCallback(self._writeData)
856 def _writeData(self,data):
857 if self.debug:
858 log.msg('SlaveFileDownloadCommand._readBlock(): '+
859 'readlen=%d' % len(data))
860 if len(data) == 0:
861 d = self.reader.callRemote('close')
862 d.addCallback(lambda _: self.finished())
863 else:
864 if self.maxsize is not None:
865 self.maxsize = self.maxsize - len(data)
866 assert self.maxsize >= 0
867 self.fp.write(data)
868 self._readBlock() # setup call back for next block (or finish)
871 def interrupt(self):
872 if self.debug:
873 log.msg('interrupted')
874 if self.interrupted:
875 return
876 if self.stderr is None:
877 self.stderr = 'Download of %r interrupted' % self.path
878 self.rc = 1
879 self.interrupted = True
880 self.finished()
883 def finished(self):
884 if self.fp is not None:
885 self.fp.close()
887 if self.debug:
888 log.msg('finished: stderr=%r, rc=%r' % (self.stderr,self.rc))
889 if self.stderr is None:
890 self.sendStatus({'rc':self.rc})
891 else:
892 self.sendStatus({'stderr':self.stderr, 'rc':self.rc})
893 self.cmd.callback(0)
896 registerSlaveCommand("downloadFile", SlaveFileDownloadCommand, command_version)
900 class SlaveShellCommand(Command):
901 """This is a Command which runs a shell command. The args dict contains
902 the following keys:
904 - ['command'] (required): a shell command to run. If this is a string,
905 it will be run with /bin/sh (['/bin/sh',
906 '-c', command]). If it is a list
907 (preferred), it will be used directly.
908 - ['workdir'] (required): subdirectory in which the command will be
909 run, relative to the builder dir
910 - ['env']: a dict of environment variables to augment/replace
911 os.environ . PYTHONPATH is treated specially, and
912 should be a list of path components to be prepended to
913 any existing PYTHONPATH environment variable.
914 - ['initial_stdin']: a string which will be written to the command's
915 stdin as soon as it starts
916 - ['keep_stdin_open']: unless True, the command's stdin will be
917 closed as soon as initial_stdin has been
918 written. Set this to True if you plan to write
919 to stdin after the command has been started.
920 - ['want_stdout']: 0 if stdout should be thrown away
921 - ['want_stderr']: 0 if stderr should be thrown away
922 - ['not_really']: 1 to skip execution and return rc=0
923 - ['timeout']: seconds of silence to tolerate before killing command
924 - ['logfiles']: dict mapping LogFile name to the workdir-relative
925 filename of a local log file. This local file will be
926 watched just like 'tail -f', and all changes will be
927 written to 'log' status updates.
929 ShellCommand creates the following status messages:
930 - {'stdout': data} : when stdout data is available
931 - {'stderr': data} : when stderr data is available
932 - {'header': data} : when headers (command start/stop) are available
933 - {'log': (logfile_name, data)} : when log files have new contents
934 - {'rc': rc} : when the process has terminated
937 def start(self):
938 args = self.args
939 # args['workdir'] is relative to Builder directory, and is required.
940 assert args['workdir'] is not None
941 workdir = os.path.join(self.builder.basedir, args['workdir'])
943 c = ShellCommand(self.builder, args['command'],
944 workdir, environ=args.get('env'),
945 timeout=args.get('timeout', None),
946 sendStdout=args.get('want_stdout', True),
947 sendStderr=args.get('want_stderr', True),
948 sendRC=True,
949 initialStdin=args.get('initial_stdin'),
950 keepStdinOpen=args.get('keep_stdin_open'),
951 logfiles=args.get('logfiles', {}),
953 self.command = c
954 d = self.command.start()
955 return d
957 def interrupt(self):
958 self.interrupted = True
959 self.command.kill("command interrupted")
961 def writeStdin(self, data):
962 self.command.writeStdin(data)
964 def closeStdin(self):
965 self.command.closeStdin()
967 registerSlaveCommand("shell", SlaveShellCommand, command_version)
970 class DummyCommand(Command):
972 I am a dummy no-op command that by default takes 5 seconds to complete.
973 See L{buildbot.process.step.RemoteDummy}
976 def start(self):
977 self.d = defer.Deferred()
978 log.msg(" starting dummy command [%s]" % self.stepId)
979 self.timer = reactor.callLater(1, self.doStatus)
980 return self.d
982 def interrupt(self):
983 if self.interrupted:
984 return
985 self.timer.cancel()
986 self.timer = None
987 self.interrupted = True
988 self.finished()
990 def doStatus(self):
991 log.msg(" sending intermediate status")
992 self.sendStatus({'stdout': 'data'})
993 timeout = self.args.get('timeout', 5) + 1
994 self.timer = reactor.callLater(timeout - 1, self.finished)
996 def finished(self):
997 log.msg(" dummy command finished [%s]" % self.stepId)
998 if self.interrupted:
999 self.sendStatus({'rc': 1})
1000 else:
1001 self.sendStatus({'rc': 0})
1002 self.d.callback(0)
1004 registerSlaveCommand("dummy", DummyCommand, command_version)
1007 class SourceBase(Command):
1008 """Abstract base class for Version Control System operations (checkout
1009 and update). This class extracts the following arguments from the
1010 dictionary received from the master:
1012 - ['workdir']: (required) the subdirectory where the buildable sources
1013 should be placed
1015 - ['mode']: one of update/copy/clobber/export, defaults to 'update'
1017 - ['revision']: If not None, this is an int or string which indicates
1018 which sources (along a time-like axis) should be used.
1019 It is the thing you provide as the CVS -r or -D
1020 argument.
1022 - ['patch']: If not None, this is a tuple of (striplevel, patch)
1023 which contains a patch that should be applied after the
1024 checkout has occurred. Once applied, the tree is no
1025 longer eligible for use with mode='update', and it only
1026 makes sense to use this in conjunction with a
1027 ['revision'] argument. striplevel is an int, and patch
1028 is a string in standard unified diff format. The patch
1029 will be applied with 'patch -p%d <PATCH', with
1030 STRIPLEVEL substituted as %d. The command will fail if
1031 the patch process fails (rejected hunks).
1033 - ['timeout']: seconds of silence tolerated before we kill off the
1034 command
1036 - ['retry']: If not None, this is a tuple of (delay, repeats)
1037 which means that any failed VC updates should be
1038 reattempted, up to REPEATS times, after a delay of
1039 DELAY seconds. This is intended to deal with slaves
1040 that experience transient network failures.
1043 sourcedata = ""
1045 def setup(self, args):
1046 # if we need to parse the output, use this environment. Otherwise
1047 # command output will be in whatever the buildslave's native language
1048 # has been set to.
1049 self.env = os.environ.copy()
1050 self.env['LC_ALL'] = "C"
1052 self.workdir = args['workdir']
1053 self.mode = args.get('mode', "update")
1054 self.revision = args.get('revision')
1055 self.patch = args.get('patch')
1056 self.timeout = args.get('timeout', 120)
1057 self.retry = args.get('retry')
1058 # VC-specific subclasses should override this to extract more args.
1059 # Make sure to upcall!
1061 def start(self):
1062 self.sendStatus({'header': "starting " + self.header + "\n"})
1063 self.command = None
1065 # self.srcdir is where the VC system should put the sources
1066 if self.mode == "copy":
1067 self.srcdir = "source" # hardwired directory name, sorry
1068 else:
1069 self.srcdir = self.workdir
1070 self.sourcedatafile = os.path.join(self.builder.basedir,
1071 self.srcdir,
1072 ".buildbot-sourcedata")
1074 d = defer.succeed(None)
1075 # do we need to clobber anything?
1076 if self.mode in ("copy", "clobber", "export"):
1077 d.addCallback(self.doClobber, self.workdir)
1078 if not (self.sourcedirIsUpdateable() and self.sourcedataMatches()):
1079 # the directory cannot be updated, so we have to clobber it.
1080 # Perhaps the master just changed modes from 'export' to
1081 # 'update'.
1082 d.addCallback(self.doClobber, self.srcdir)
1084 d.addCallback(self.doVC)
1086 if self.mode == "copy":
1087 d.addCallback(self.doCopy)
1088 if self.patch:
1089 d.addCallback(self.doPatch)
1090 d.addCallbacks(self._sendRC, self._checkAbandoned)
1091 return d
1093 def interrupt(self):
1094 self.interrupted = True
1095 if self.command:
1096 self.command.kill("command interrupted")
1098 def doVC(self, res):
1099 if self.interrupted:
1100 raise AbandonChain(1)
1101 if self.sourcedirIsUpdateable() and self.sourcedataMatches():
1102 d = self.doVCUpdate()
1103 d.addCallback(self.maybeDoVCFallback)
1104 else:
1105 d = self.doVCFull()
1106 d.addBoth(self.maybeDoVCRetry)
1107 d.addCallback(self._abandonOnFailure)
1108 d.addCallback(self._handleGotRevision)
1109 d.addCallback(self.writeSourcedata)
1110 return d
1112 def sourcedataMatches(self):
1113 try:
1114 olddata = open(self.sourcedatafile, "r").read()
1115 if olddata != self.sourcedata:
1116 return False
1117 except IOError:
1118 return False
1119 return True
1121 def _handleGotRevision(self, res):
1122 d = defer.maybeDeferred(self.parseGotRevision)
1123 d.addCallback(lambda got_revision:
1124 self.sendStatus({'got_revision': got_revision}))
1125 return d
1127 def parseGotRevision(self):
1128 """Override this in a subclass. It should return a string that
1129 represents which revision was actually checked out, or a Deferred
1130 that will fire with such a string. If, in a future build, you were to
1131 pass this 'got_revision' string in as the 'revision' component of a
1132 SourceStamp, you should wind up with the same source code as this
1133 checkout just obtained.
1135 It is probably most useful to scan self.command.stdout for a string
1136 of some sort. Be sure to set keepStdout=True on the VC command that
1137 you run, so that you'll have something available to look at.
1139 If this information is unavailable, just return None."""
1141 return None
1143 def writeSourcedata(self, res):
1144 open(self.sourcedatafile, "w").write(self.sourcedata)
1145 return res
1147 def sourcedirIsUpdateable(self):
1148 raise NotImplementedError("this must be implemented in a subclass")
1150 def doVCUpdate(self):
1151 raise NotImplementedError("this must be implemented in a subclass")
1153 def doVCFull(self):
1154 raise NotImplementedError("this must be implemented in a subclass")
1156 def maybeDoVCFallback(self, rc):
1157 if type(rc) is int and rc == 0:
1158 return rc
1159 if self.interrupted:
1160 raise AbandonChain(1)
1161 msg = "update failed, clobbering and trying again"
1162 self.sendStatus({'header': msg + "\n"})
1163 log.msg(msg)
1164 d = self.doClobber(None, self.srcdir)
1165 d.addCallback(self.doVCFallback2)
1166 return d
1168 def doVCFallback2(self, res):
1169 msg = "now retrying VC operation"
1170 self.sendStatus({'header': msg + "\n"})
1171 log.msg(msg)
1172 d = self.doVCFull()
1173 d.addBoth(self.maybeDoVCRetry)
1174 d.addCallback(self._abandonOnFailure)
1175 return d
1177 def maybeDoVCRetry(self, res):
1178 """We get here somewhere after a VC chain has finished. res could
1179 be::
1181 - 0: the operation was successful
1182 - nonzero: the operation failed. retry if possible
1183 - AbandonChain: the operation failed, someone else noticed. retry.
1184 - Failure: some other exception, re-raise
1187 if isinstance(res, failure.Failure):
1188 if self.interrupted:
1189 return res # don't re-try interrupted builds
1190 res.trap(AbandonChain)
1191 else:
1192 if type(res) is int and res == 0:
1193 return res
1194 if self.interrupted:
1195 raise AbandonChain(1)
1196 # if we get here, we should retry, if possible
1197 if self.retry:
1198 delay, repeats = self.retry
1199 if repeats >= 0:
1200 self.retry = (delay, repeats-1)
1201 msg = ("update failed, trying %d more times after %d seconds"
1202 % (repeats, delay))
1203 self.sendStatus({'header': msg + "\n"})
1204 log.msg(msg)
1205 d = defer.Deferred()
1206 d.addCallback(lambda res: self.doVCFull())
1207 d.addBoth(self.maybeDoVCRetry)
1208 reactor.callLater(delay, d.callback, None)
1209 return d
1210 return res
1212 def doClobber(self, dummy, dirname):
1213 # TODO: remove the old tree in the background
1214 ## workdir = os.path.join(self.builder.basedir, self.workdir)
1215 ## deaddir = self.workdir + ".deleting"
1216 ## if os.path.isdir(workdir):
1217 ## try:
1218 ## os.rename(workdir, deaddir)
1219 ## # might fail if deaddir already exists: previous deletion
1220 ## # hasn't finished yet
1221 ## # start the deletion in the background
1222 ## # TODO: there was a solaris/NetApp/NFS problem where a
1223 ## # process that was still running out of the directory we're
1224 ## # trying to delete could prevent the rm-rf from working. I
1225 ## # think it stalled the rm, but maybe it just died with
1226 ## # permission issues. Try to detect this.
1227 ## os.commands("rm -rf %s &" % deaddir)
1228 ## except:
1229 ## # fall back to sequential delete-then-checkout
1230 ## pass
1231 d = os.path.join(self.builder.basedir, dirname)
1232 if runtime.platformType != "posix":
1233 # if we're running on w32, use rmtree instead. It will block,
1234 # but hopefully it won't take too long.
1235 rmdirRecursive(d)
1236 return defer.succeed(0)
1237 command = ["rm", "-rf", d]
1238 c = ShellCommand(self.builder, command, self.builder.basedir,
1239 sendRC=0, timeout=self.timeout)
1240 self.command = c
1241 # sendRC=0 means the rm command will send stdout/stderr to the
1242 # master, but not the rc=0 when it finishes. That job is left to
1243 # _sendRC
1244 d = c.start()
1245 d.addCallback(self._abandonOnFailure)
1246 return d
1248 def doCopy(self, res):
1249 # now copy tree to workdir
1250 fromdir = os.path.join(self.builder.basedir, self.srcdir)
1251 todir = os.path.join(self.builder.basedir, self.workdir)
1252 if runtime.platformType != "posix":
1253 shutil.copytree(fromdir, todir)
1254 return defer.succeed(0)
1255 command = ['cp', '-r', '-p', fromdir, todir]
1256 c = ShellCommand(self.builder, command, self.builder.basedir,
1257 sendRC=False, timeout=self.timeout)
1258 self.command = c
1259 d = c.start()
1260 d.addCallback(self._abandonOnFailure)
1261 return d
1263 def doPatch(self, res):
1264 patchlevel, diff = self.patch
1265 command = [getCommand("patch"), '-p%d' % patchlevel]
1266 dir = os.path.join(self.builder.basedir, self.workdir)
1267 # mark the directory so we don't try to update it later
1268 open(os.path.join(dir, ".buildbot-patched"), "w").write("patched\n")
1269 # now apply the patch
1270 c = ShellCommand(self.builder, command, dir,
1271 sendRC=False, timeout=self.timeout,
1272 initialStdin=diff)
1273 self.command = c
1274 d = c.start()
1275 d.addCallback(self._abandonOnFailure)
1276 return d
1279 class CVS(SourceBase):
1280 """CVS-specific VC operation. In addition to the arguments handled by
1281 SourceBase, this command reads the following keys:
1283 ['cvsroot'] (required): the CVSROOT repository string
1284 ['cvsmodule'] (required): the module to be retrieved
1285 ['branch']: a '-r' tag or branch name to use for the checkout/update
1286 ['login']: a string for use as a password to 'cvs login'
1287 ['global_options']: a list of strings to use before the CVS verb
1290 header = "cvs operation"
1292 def setup(self, args):
1293 SourceBase.setup(self, args)
1294 self.vcexe = getCommand("cvs")
1295 self.cvsroot = args['cvsroot']
1296 self.cvsmodule = args['cvsmodule']
1297 self.global_options = args.get('global_options', [])
1298 self.branch = args.get('branch')
1299 self.login = args.get('login')
1300 self.sourcedata = "%s\n%s\n%s\n" % (self.cvsroot, self.cvsmodule,
1301 self.branch)
1303 def sourcedirIsUpdateable(self):
1304 if os.path.exists(os.path.join(self.builder.basedir,
1305 self.srcdir, ".buildbot-patched")):
1306 return False
1307 return os.path.isdir(os.path.join(self.builder.basedir,
1308 self.srcdir, "CVS"))
1310 def start(self):
1311 if self.login is not None:
1312 # need to do a 'cvs login' command first
1313 d = self.builder.basedir
1314 command = ([self.vcexe, '-d', self.cvsroot] + self.global_options
1315 + ['login'])
1316 c = ShellCommand(self.builder, command, d,
1317 sendRC=False, timeout=self.timeout,
1318 initialStdin=self.login+"\n")
1319 self.command = c
1320 d = c.start()
1321 d.addCallback(self._abandonOnFailure)
1322 d.addCallback(self._didLogin)
1323 return d
1324 else:
1325 return self._didLogin(None)
1327 def _didLogin(self, res):
1328 # now we really start
1329 return SourceBase.start(self)
1331 def doVCUpdate(self):
1332 d = os.path.join(self.builder.basedir, self.srcdir)
1333 command = [self.vcexe, '-z3'] + self.global_options + ['update', '-dP']
1334 if self.branch:
1335 command += ['-r', self.branch]
1336 if self.revision:
1337 command += ['-D', self.revision]
1338 c = ShellCommand(self.builder, command, d,
1339 sendRC=False, timeout=self.timeout)
1340 self.command = c
1341 return c.start()
1343 def doVCFull(self):
1344 d = self.builder.basedir
1345 if self.mode == "export":
1346 verb = "export"
1347 else:
1348 verb = "checkout"
1349 command = ([self.vcexe, '-d', self.cvsroot, '-z3'] +
1350 self.global_options +
1351 [verb, '-d', self.srcdir])
1352 if self.branch:
1353 command += ['-r', self.branch]
1354 if self.revision:
1355 command += ['-D', self.revision]
1356 command += [self.cvsmodule]
1357 c = ShellCommand(self.builder, command, d,
1358 sendRC=False, timeout=self.timeout)
1359 self.command = c
1360 return c.start()
1362 def parseGotRevision(self):
1363 # CVS does not have any kind of revision stamp to speak of. We return
1364 # the current timestamp as a best-effort guess, but this depends upon
1365 # the local system having a clock that is
1366 # reasonably-well-synchronized with the repository.
1367 return time.strftime("%Y-%m-%d %H:%M:%S +0000", time.gmtime())
1369 registerSlaveCommand("cvs", CVS, command_version)
1371 class SVN(SourceBase):
1372 """Subversion-specific VC operation. In addition to the arguments
1373 handled by SourceBase, this command reads the following keys:
1375 ['svnurl'] (required): the SVN repository string
1378 header = "svn operation"
1380 def setup(self, args):
1381 SourceBase.setup(self, args)
1382 self.vcexe = getCommand("svn")
1383 self.svnurl = args['svnurl']
1384 self.sourcedata = "%s\n" % self.svnurl
1386 def sourcedirIsUpdateable(self):
1387 if os.path.exists(os.path.join(self.builder.basedir,
1388 self.srcdir, ".buildbot-patched")):
1389 return False
1390 return os.path.isdir(os.path.join(self.builder.basedir,
1391 self.srcdir, ".svn"))
1393 def doVCUpdate(self):
1394 revision = self.args['revision'] or 'HEAD'
1395 # update: possible for mode in ('copy', 'update')
1396 d = os.path.join(self.builder.basedir, self.srcdir)
1397 command = [self.vcexe, 'update', '--revision', str(revision),
1398 '--non-interactive']
1399 c = ShellCommand(self.builder, command, d,
1400 sendRC=False, timeout=self.timeout,
1401 keepStdout=True)
1402 self.command = c
1403 return c.start()
1405 def doVCFull(self):
1406 revision = self.args['revision'] or 'HEAD'
1407 d = self.builder.basedir
1408 if self.mode == "export":
1409 command = [self.vcexe, 'export', '--revision', str(revision),
1410 '--non-interactive',
1411 self.svnurl, self.srcdir]
1412 else:
1413 # mode=='clobber', or copy/update on a broken workspace
1414 command = [self.vcexe, 'checkout', '--revision', str(revision),
1415 '--non-interactive',
1416 self.svnurl, self.srcdir]
1417 c = ShellCommand(self.builder, command, d,
1418 sendRC=False, timeout=self.timeout,
1419 keepStdout=True)
1420 self.command = c
1421 return c.start()
1423 def parseGotRevision(self):
1424 # svn checkout operations finish with 'Checked out revision 16657.'
1425 # svn update operations finish the line 'At revision 16654.'
1426 # But we don't use those. Instead, run 'svnversion'.
1427 svnversion_command = getCommand("svnversion")
1428 # older versions of 'svnversion' (1.1.4) require the WC_PATH
1429 # argument, newer ones (1.3.1) do not.
1430 command = [svnversion_command, "."]
1431 c = ShellCommand(self.builder, command,
1432 os.path.join(self.builder.basedir, self.srcdir),
1433 environ=self.env,
1434 sendStdout=False, sendStderr=False, sendRC=False,
1435 keepStdout=True)
1436 c.usePTY = False
1437 d = c.start()
1438 def _parse(res):
1439 r = c.stdout.strip()
1440 got_version = None
1441 try:
1442 got_version = int(r)
1443 except ValueError:
1444 msg =("SVN.parseGotRevision unable to parse output "
1445 "of svnversion: '%s'" % r)
1446 log.msg(msg)
1447 self.sendStatus({'header': msg + "\n"})
1448 return got_version
1449 d.addCallback(_parse)
1450 return d
1453 registerSlaveCommand("svn", SVN, command_version)
1455 class Darcs(SourceBase):
1456 """Darcs-specific VC operation. In addition to the arguments
1457 handled by SourceBase, this command reads the following keys:
1459 ['repourl'] (required): the Darcs repository string
1462 header = "darcs operation"
1464 def setup(self, args):
1465 SourceBase.setup(self, args)
1466 self.vcexe = getCommand("darcs")
1467 self.repourl = args['repourl']
1468 self.sourcedata = "%s\n" % self.repourl
1469 self.revision = self.args.get('revision')
1471 def sourcedirIsUpdateable(self):
1472 if os.path.exists(os.path.join(self.builder.basedir,
1473 self.srcdir, ".buildbot-patched")):
1474 return False
1475 if self.revision:
1476 # checking out a specific revision requires a full 'darcs get'
1477 return False
1478 return os.path.isdir(os.path.join(self.builder.basedir,
1479 self.srcdir, "_darcs"))
1481 def doVCUpdate(self):
1482 assert not self.revision
1483 # update: possible for mode in ('copy', 'update')
1484 d = os.path.join(self.builder.basedir, self.srcdir)
1485 command = [self.vcexe, 'pull', '--all', '--verbose']
1486 c = ShellCommand(self.builder, command, d,
1487 sendRC=False, timeout=self.timeout)
1488 self.command = c
1489 return c.start()
1491 def doVCFull(self):
1492 # checkout or export
1493 d = self.builder.basedir
1494 command = [self.vcexe, 'get', '--verbose', '--partial',
1495 '--repo-name', self.srcdir]
1496 if self.revision:
1497 # write the context to a file
1498 n = os.path.join(self.builder.basedir, ".darcs-context")
1499 f = open(n, "wb")
1500 f.write(self.revision)
1501 f.close()
1502 # tell Darcs to use that context
1503 command.append('--context')
1504 command.append(n)
1505 command.append(self.repourl)
1507 c = ShellCommand(self.builder, command, d,
1508 sendRC=False, timeout=self.timeout)
1509 self.command = c
1510 d = c.start()
1511 if self.revision:
1512 d.addCallback(self.removeContextFile, n)
1513 return d
1515 def removeContextFile(self, res, n):
1516 os.unlink(n)
1517 return res
1519 def parseGotRevision(self):
1520 # we use 'darcs context' to find out what we wound up with
1521 command = [self.vcexe, "changes", "--context"]
1522 c = ShellCommand(self.builder, command,
1523 os.path.join(self.builder.basedir, self.srcdir),
1524 environ=self.env,
1525 sendStdout=False, sendStderr=False, sendRC=False,
1526 keepStdout=True)
1527 c.usePTY = False
1528 d = c.start()
1529 d.addCallback(lambda res: c.stdout)
1530 return d
1532 registerSlaveCommand("darcs", Darcs, command_version)
1534 class Monotone(SourceBase):
1535 """Monotone-specific VC operation. In addition to the arguments handled
1536 by SourceBase, this command reads the following keys:
1538 ['server_addr'] (required): the address of the server to pull from
1539 ['branch'] (required): the branch the revision is on
1540 ['db_path'] (required): the local database path to use
1541 ['revision'] (required): the revision to check out
1542 ['monotone']: (required): path to monotone executable
1545 header = "monotone operation"
1547 def setup(self, args):
1548 SourceBase.setup(self, args)
1549 self.server_addr = args["server_addr"]
1550 self.branch = args["branch"]
1551 self.db_path = args["db_path"]
1552 self.revision = args["revision"]
1553 self.monotone = args["monotone"]
1554 self._made_fulls = False
1555 self._pull_timeout = args["timeout"]
1557 def _makefulls(self):
1558 if not self._made_fulls:
1559 basedir = self.builder.basedir
1560 self.full_db_path = os.path.join(basedir, self.db_path)
1561 self.full_srcdir = os.path.join(basedir, self.srcdir)
1562 self._made_fulls = True
1564 def sourcedirIsUpdateable(self):
1565 self._makefulls()
1566 if os.path.exists(os.path.join(self.full_srcdir,
1567 ".buildbot_patched")):
1568 return False
1569 return (os.path.isfile(self.full_db_path)
1570 and os.path.isdir(os.path.join(self.full_srcdir, "MT")))
1572 def doVCUpdate(self):
1573 return self._withFreshDb(self._doUpdate)
1575 def _doUpdate(self):
1576 # update: possible for mode in ('copy', 'update')
1577 command = [self.monotone, "update",
1578 "-r", self.revision,
1579 "-b", self.branch]
1580 c = ShellCommand(self.builder, command, self.full_srcdir,
1581 sendRC=False, timeout=self.timeout)
1582 self.command = c
1583 return c.start()
1585 def doVCFull(self):
1586 return self._withFreshDb(self._doFull)
1588 def _doFull(self):
1589 command = [self.monotone, "--db=" + self.full_db_path,
1590 "checkout",
1591 "-r", self.revision,
1592 "-b", self.branch,
1593 self.full_srcdir]
1594 c = ShellCommand(self.builder, command, self.builder.basedir,
1595 sendRC=False, timeout=self.timeout)
1596 self.command = c
1597 return c.start()
1599 def _withFreshDb(self, callback):
1600 self._makefulls()
1601 # first ensure the db exists and is usable
1602 if os.path.isfile(self.full_db_path):
1603 # already exists, so run 'db migrate' in case monotone has been
1604 # upgraded under us
1605 command = [self.monotone, "db", "migrate",
1606 "--db=" + self.full_db_path]
1607 else:
1608 # We'll be doing an initial pull, so up the timeout to 3 hours to
1609 # make sure it will have time to complete.
1610 self._pull_timeout = max(self._pull_timeout, 3 * 60 * 60)
1611 self.sendStatus({"header": "creating database %s\n"
1612 % (self.full_db_path,)})
1613 command = [self.monotone, "db", "init",
1614 "--db=" + self.full_db_path]
1615 c = ShellCommand(self.builder, command, self.builder.basedir,
1616 sendRC=False, timeout=self.timeout)
1617 self.command = c
1618 d = c.start()
1619 d.addCallback(self._abandonOnFailure)
1620 d.addCallback(self._didDbInit)
1621 d.addCallback(self._didPull, callback)
1622 return d
1624 def _didDbInit(self, res):
1625 command = [self.monotone, "--db=" + self.full_db_path,
1626 "pull", "--ticker=dot", self.server_addr, self.branch]
1627 c = ShellCommand(self.builder, command, self.builder.basedir,
1628 sendRC=False, timeout=self._pull_timeout)
1629 self.sendStatus({"header": "pulling %s from %s\n"
1630 % (self.branch, self.server_addr)})
1631 self.command = c
1632 return c.start()
1634 def _didPull(self, res, callback):
1635 return callback()
1637 registerSlaveCommand("monotone", Monotone, command_version)
1640 class Git(SourceBase):
1641 """Git specific VC operation. In addition to the arguments
1642 handled by SourceBase, this command reads the following keys:
1644 ['repourl'] (required): the Cogito repository string
1647 header = "git operation"
1649 def setup(self, args):
1650 SourceBase.setup(self, args)
1651 self.repourl = args['repourl']
1652 #self.sourcedata = "" # TODO
1654 def sourcedirIsUpdateable(self):
1655 if os.path.exists(os.path.join(self.builder.basedir,
1656 self.srcdir, ".buildbot-patched")):
1657 return False
1658 return os.path.isdir(os.path.join(self.builder.basedir,
1659 self.srcdir, ".git"))
1661 def doVCUpdate(self):
1662 d = os.path.join(self.builder.basedir, self.srcdir)
1663 command = ['cg-update']
1664 c = ShellCommand(self.builder, command, d,
1665 sendRC=False, timeout=self.timeout)
1666 self.command = c
1667 return c.start()
1669 def doVCFull(self):
1670 d = os.path.join(self.builder.basedir, self.srcdir)
1671 os.mkdir(d)
1672 command = ['cg-clone', '-s', self.repourl]
1673 c = ShellCommand(self.builder, command, d,
1674 sendRC=False, timeout=self.timeout)
1675 self.command = c
1676 return c.start()
1678 registerSlaveCommand("git", Git, command_version)
1680 class Arch(SourceBase):
1681 """Arch-specific (tla-specific) VC operation. In addition to the
1682 arguments handled by SourceBase, this command reads the following keys:
1684 ['url'] (required): the repository string
1685 ['version'] (required): which version (i.e. branch) to retrieve
1686 ['revision'] (optional): the 'patch-NN' argument to check out
1687 ['archive']: the archive name to use. If None, use the archive's default
1688 ['build-config']: if present, give to 'tla build-config' after checkout
1691 header = "arch operation"
1692 buildconfig = None
1694 def setup(self, args):
1695 SourceBase.setup(self, args)
1696 self.vcexe = getCommand("tla")
1697 self.archive = args.get('archive')
1698 self.url = args['url']
1699 self.version = args['version']
1700 self.revision = args.get('revision')
1701 self.buildconfig = args.get('build-config')
1702 self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
1703 self.buildconfig)
1705 def sourcedirIsUpdateable(self):
1706 if self.revision:
1707 # Arch cannot roll a directory backwards, so if they ask for a
1708 # specific revision, clobber the directory. Technically this
1709 # could be limited to the cases where the requested revision is
1710 # later than our current one, but it's too hard to extract the
1711 # current revision from the tree.
1712 return False
1713 if os.path.exists(os.path.join(self.builder.basedir,
1714 self.srcdir, ".buildbot-patched")):
1715 return False
1716 return os.path.isdir(os.path.join(self.builder.basedir,
1717 self.srcdir, "{arch}"))
1719 def doVCUpdate(self):
1720 # update: possible for mode in ('copy', 'update')
1721 d = os.path.join(self.builder.basedir, self.srcdir)
1722 command = [self.vcexe, 'replay']
1723 if self.revision:
1724 command.append(self.revision)
1725 c = ShellCommand(self.builder, command, d,
1726 sendRC=False, timeout=self.timeout)
1727 self.command = c
1728 return c.start()
1730 def doVCFull(self):
1731 # to do a checkout, we must first "register" the archive by giving
1732 # the URL to tla, which will go to the repository at that URL and
1733 # figure out the archive name. tla will tell you the archive name
1734 # when it is done, and all further actions must refer to this name.
1736 command = [self.vcexe, 'register-archive', '--force', self.url]
1737 c = ShellCommand(self.builder, command, self.builder.basedir,
1738 sendRC=False, keepStdout=True,
1739 timeout=self.timeout)
1740 self.command = c
1741 d = c.start()
1742 d.addCallback(self._abandonOnFailure)
1743 d.addCallback(self._didRegister, c)
1744 return d
1746 def _didRegister(self, res, c):
1747 # find out what tla thinks the archive name is. If the user told us
1748 # to use something specific, make sure it matches.
1749 r = re.search(r'Registering archive: (\S+)\s*$', c.stdout)
1750 if r:
1751 msg = "tla reports archive name is '%s'" % r.group(1)
1752 log.msg(msg)
1753 self.builder.sendUpdate({'header': msg+"\n"})
1754 if self.archive and r.group(1) != self.archive:
1755 msg = (" mismatch, we wanted an archive named '%s'"
1756 % self.archive)
1757 log.msg(msg)
1758 self.builder.sendUpdate({'header': msg+"\n"})
1759 raise AbandonChain(-1)
1760 self.archive = r.group(1)
1761 assert self.archive, "need archive name to continue"
1762 return self._doGet()
1764 def _doGet(self):
1765 ver = self.version
1766 if self.revision:
1767 ver += "--%s" % self.revision
1768 command = [self.vcexe, 'get', '--archive', self.archive,
1769 '--no-pristine',
1770 ver, self.srcdir]
1771 c = ShellCommand(self.builder, command, self.builder.basedir,
1772 sendRC=False, timeout=self.timeout)
1773 self.command = c
1774 d = c.start()
1775 d.addCallback(self._abandonOnFailure)
1776 if self.buildconfig:
1777 d.addCallback(self._didGet)
1778 return d
1780 def _didGet(self, res):
1781 d = os.path.join(self.builder.basedir, self.srcdir)
1782 command = [self.vcexe, 'build-config', self.buildconfig]
1783 c = ShellCommand(self.builder, command, d,
1784 sendRC=False, timeout=self.timeout)
1785 self.command = c
1786 d = c.start()
1787 d.addCallback(self._abandonOnFailure)
1788 return d
1790 def parseGotRevision(self):
1791 # using code from tryclient.TlaExtractor
1792 # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
1793 # 'tla logs' gives us REVISION
1794 command = [self.vcexe, "logs", "--full", "--reverse"]
1795 c = ShellCommand(self.builder, command,
1796 os.path.join(self.builder.basedir, self.srcdir),
1797 environ=self.env,
1798 sendStdout=False, sendStderr=False, sendRC=False,
1799 keepStdout=True)
1800 c.usePTY = False
1801 d = c.start()
1802 def _parse(res):
1803 tid = c.stdout.split("\n")[0].strip()
1804 slash = tid.index("/")
1805 dd = tid.rindex("--")
1806 #branch = tid[slash+1:dd]
1807 baserev = tid[dd+2:]
1808 return baserev
1809 d.addCallback(_parse)
1810 return d
1812 registerSlaveCommand("arch", Arch, command_version)
1814 class Bazaar(Arch):
1815 """Bazaar (/usr/bin/baz) is an alternative client for Arch repositories.
1816 It is mostly option-compatible, but archive registration is different
1817 enough to warrant a separate Command.
1819 ['archive'] (required): the name of the archive being used
1822 def setup(self, args):
1823 Arch.setup(self, args)
1824 self.vcexe = getCommand("baz")
1825 # baz doesn't emit the repository name after registration (and
1826 # grepping through the output of 'baz archives' is too hard), so we
1827 # require that the buildmaster configuration to provide both the
1828 # archive name and the URL.
1829 self.archive = args['archive'] # required for Baz
1830 self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
1831 self.buildconfig)
1833 # in _didRegister, the regexp won't match, so we'll stick with the name
1834 # in self.archive
1836 def _doGet(self):
1837 # baz prefers ARCHIVE/VERSION. This will work even if
1838 # my-default-archive is not set.
1839 ver = self.archive + "/" + self.version
1840 if self.revision:
1841 ver += "--%s" % self.revision
1842 command = [self.vcexe, 'get', '--no-pristine',
1843 ver, self.srcdir]
1844 c = ShellCommand(self.builder, command, self.builder.basedir,
1845 sendRC=False, timeout=self.timeout)
1846 self.command = c
1847 d = c.start()
1848 d.addCallback(self._abandonOnFailure)
1849 if self.buildconfig:
1850 d.addCallback(self._didGet)
1851 return d
1853 def parseGotRevision(self):
1854 # using code from tryclient.BazExtractor
1855 command = [self.vcexe, "tree-id"]
1856 c = ShellCommand(self.builder, command,
1857 os.path.join(self.builder.basedir, self.srcdir),
1858 environ=self.env,
1859 sendStdout=False, sendStderr=False, sendRC=False,
1860 keepStdout=True)
1861 c.usePTY = False
1862 d = c.start()
1863 def _parse(res):
1864 tid = c.stdout.strip()
1865 slash = tid.index("/")
1866 dd = tid.rindex("--")
1867 #branch = tid[slash+1:dd]
1868 baserev = tid[dd+2:]
1869 return baserev
1870 d.addCallback(_parse)
1871 return d
1873 registerSlaveCommand("bazaar", Bazaar, command_version)
1876 class Mercurial(SourceBase):
1877 """Mercurial specific VC operation. In addition to the arguments
1878 handled by SourceBase, this command reads the following keys:
1880 ['repourl'] (required): the Cogito repository string
1883 header = "mercurial operation"
1885 def setup(self, args):
1886 SourceBase.setup(self, args)
1887 self.vcexe = getCommand("hg")
1888 self.repourl = args['repourl']
1889 self.sourcedata = "%s\n" % self.repourl
1890 self.stdout = ""
1891 self.stderr = ""
1893 def sourcedirIsUpdateable(self):
1894 if os.path.exists(os.path.join(self.builder.basedir,
1895 self.srcdir, ".buildbot-patched")):
1896 return False
1897 # like Darcs, to check out a specific (old) revision, we have to do a
1898 # full checkout. TODO: I think 'hg pull' plus 'hg update' might work
1899 if self.revision:
1900 return False
1901 return os.path.isdir(os.path.join(self.builder.basedir,
1902 self.srcdir, ".hg"))
1904 def doVCUpdate(self):
1905 d = os.path.join(self.builder.basedir, self.srcdir)
1906 command = [self.vcexe, 'pull', '--update', '--verbose']
1907 if self.args['revision']:
1908 command.extend(['--rev', self.args['revision']])
1909 c = ShellCommand(self.builder, command, d,
1910 sendRC=False, timeout=self.timeout,
1911 keepStdout=True)
1912 self.command = c
1913 d = c.start()
1914 d.addCallback(self._handleEmptyUpdate)
1915 return d
1917 def _handleEmptyUpdate(self, res):
1918 if type(res) is int and res == 1:
1919 if self.command.stdout.find("no changes found") != -1:
1920 # 'hg pull', when it doesn't have anything to do, exits with
1921 # rc=1, and there appears to be no way to shut this off. It
1922 # emits a distinctive message to stdout, though. So catch
1923 # this and pretend that it completed successfully.
1924 return 0
1925 return res
1927 def doVCFull(self):
1928 d = os.path.join(self.builder.basedir, self.srcdir)
1929 command = [self.vcexe, 'clone']
1930 if self.args['revision']:
1931 command.extend(['--rev', self.args['revision']])
1932 command.extend([self.repourl, d])
1933 c = ShellCommand(self.builder, command, self.builder.basedir,
1934 sendRC=False, timeout=self.timeout)
1935 self.command = c
1936 return c.start()
1938 def parseGotRevision(self):
1939 # we use 'hg identify' to find out what we wound up with
1940 command = [self.vcexe, "identify"]
1941 c = ShellCommand(self.builder, command,
1942 os.path.join(self.builder.basedir, self.srcdir),
1943 environ=self.env,
1944 sendStdout=False, sendStderr=False, sendRC=False,
1945 keepStdout=True)
1946 d = c.start()
1947 def _parse(res):
1948 m = re.search(r'^(\w+)', c.stdout)
1949 return m.group(1)
1950 d.addCallback(_parse)
1951 return d
1953 registerSlaveCommand("hg", Mercurial, command_version)
1956 class P4(SourceBase):
1957 """A P4 source-updater.
1959 ['p4port'] (required): host:port for server to access
1960 ['p4user'] (optional): user to use for access
1961 ['p4passwd'] (optional): passwd to try for the user
1962 ['p4client'] (optional): client spec to use
1963 ['p4views'] (optional): client views to use
1966 header = "p4"
1968 def setup(self, args):
1969 SourceBase.setup(self, args)
1970 self.p4port = args['p4port']
1971 self.p4client = args['p4client']
1972 self.p4user = args['p4user']
1973 self.p4passwd = args['p4passwd']
1974 self.p4base = args['p4base']
1975 self.p4extra_views = args['p4extra_views']
1976 self.p4mode = args['mode']
1977 self.p4branch = args['branch']
1978 self.p4logname = os.environ['LOGNAME']
1980 self.sourcedata = str([
1981 # Perforce server.
1982 self.p4port,
1984 # Client spec.
1985 self.p4client,
1987 # Depot side of view spec.
1988 self.p4base,
1989 self.p4branch,
1990 self.p4extra_views,
1992 # Local side of view spec (srcdir is made from these).
1993 self.builder.basedir,
1994 self.mode,
1995 self.workdir
1999 def sourcedirIsUpdateable(self):
2000 if os.path.exists(os.path.join(self.builder.basedir,
2001 self.srcdir, ".buildbot-patched")):
2002 return False
2003 # We assume our client spec is still around.
2004 # We just say we aren't updateable if the dir doesn't exist so we
2005 # don't get ENOENT checking the sourcedata.
2006 return os.path.isdir(os.path.join(self.builder.basedir,
2007 self.srcdir))
2009 def doVCUpdate(self):
2010 return self._doP4Sync(force=False)
2012 def _doP4Sync(self, force):
2013 command = ['p4']
2015 if self.p4port:
2016 command.extend(['-p', self.p4port])
2017 if self.p4user:
2018 command.extend(['-u', self.p4user])
2019 if self.p4passwd:
2020 command.extend(['-P', self.p4passwd])
2021 if self.p4client:
2022 command.extend(['-c', self.p4client])
2023 command.extend(['sync'])
2024 if force:
2025 command.extend(['-f'])
2026 if self.revision:
2027 command.extend(['@' + str(self.revision)])
2028 env = {}
2029 c = ShellCommand(self.builder, command, self.builder.basedir,
2030 environ=env, sendRC=False, timeout=self.timeout,
2031 keepStdout=True)
2032 self.command = c
2033 d = c.start()
2034 d.addCallback(self._abandonOnFailure)
2035 return d
2038 def doVCFull(self):
2039 env = {}
2040 command = ['p4']
2041 client_spec = ''
2042 client_spec += "Client: %s\n\n" % self.p4client
2043 client_spec += "Owner: %s\n\n" % self.p4logname
2044 client_spec += "Description:\n\tCreated by %s\n\n" % self.p4logname
2045 client_spec += "Root:\t%s\n\n" % self.builder.basedir
2046 client_spec += "Options:\tallwrite rmdir\n\n"
2047 client_spec += "LineEnd:\tlocal\n\n"
2049 # Setup a view
2050 client_spec += "View:\n\t%s" % (self.p4base)
2051 if self.p4branch:
2052 client_spec += "%s/" % (self.p4branch)
2053 client_spec += "... //%s/%s/...\n" % (self.p4client, self.srcdir)
2054 if self.p4extra_views:
2055 for k, v in self.p4extra_views:
2056 client_spec += "\t%s/... //%s/%s%s/...\n" % (k, self.p4client,
2057 self.srcdir, v)
2058 if self.p4port:
2059 command.extend(['-p', self.p4port])
2060 if self.p4user:
2061 command.extend(['-u', self.p4user])
2062 if self.p4passwd:
2063 command.extend(['-P', self.p4passwd])
2064 command.extend(['client', '-i'])
2065 log.msg(client_spec)
2066 c = ShellCommand(self.builder, command, self.builder.basedir,
2067 environ=env, sendRC=False, timeout=self.timeout,
2068 initialStdin=client_spec)
2069 self.command = c
2070 d = c.start()
2071 d.addCallback(self._abandonOnFailure)
2072 d.addCallback(lambda _: self._doP4Sync(force=True))
2073 return d
2075 registerSlaveCommand("p4", P4, command_version)
2078 class P4Sync(SourceBase):
2079 """A partial P4 source-updater. Requires manual setup of a per-slave P4
2080 environment. The only thing which comes from the master is P4PORT.
2081 'mode' is required to be 'copy'.
2083 ['p4port'] (required): host:port for server to access
2084 ['p4user'] (optional): user to use for access
2085 ['p4passwd'] (optional): passwd to try for the user
2086 ['p4client'] (optional): client spec to use
2089 header = "p4 sync"
2091 def setup(self, args):
2092 SourceBase.setup(self, args)
2093 self.vcexe = getCommand("p4")
2094 self.p4port = args['p4port']
2095 self.p4user = args['p4user']
2096 self.p4passwd = args['p4passwd']
2097 self.p4client = args['p4client']
2099 def sourcedirIsUpdateable(self):
2100 return True
2102 def _doVC(self, force):
2103 d = os.path.join(self.builder.basedir, self.srcdir)
2104 command = [self.vcexe]
2105 if self.p4port:
2106 command.extend(['-p', self.p4port])
2107 if self.p4user:
2108 command.extend(['-u', self.p4user])
2109 if self.p4passwd:
2110 command.extend(['-P', self.p4passwd])
2111 if self.p4client:
2112 command.extend(['-c', self.p4client])
2113 command.extend(['sync'])
2114 if force:
2115 command.extend(['-f'])
2116 if self.revision:
2117 command.extend(['@' + self.revision])
2118 env = {}
2119 c = ShellCommand(self.builder, command, d, environ=env,
2120 sendRC=False, timeout=self.timeout)
2121 self.command = c
2122 return c.start()
2124 def doVCUpdate(self):
2125 return self._doVC(force=False)
2127 def doVCFull(self):
2128 return self._doVC(force=True)
2130 registerSlaveCommand("p4sync", P4Sync, command_version)