reconfig no longer interrupts builds, nor does it disconnect/reconnect slaves
[buildbot.git] / buildbot / slave / commands.py
blob6b3bc7b56abdeff847ad7293ecc69e60bf90a7ca
1 # -*- test-case-name: buildbot.test.test_slavecommand -*-
3 import os, re, signal, shutil, types, time
4 from stat import ST_CTIME, ST_MTIME, ST_SIZE
6 from twisted.internet.protocol import ProcessProtocol
7 from twisted.internet import reactor, defer, task
8 from twisted.python import log, failure, runtime
10 from buildbot.twcompat import implements, which
11 from buildbot.slave.interfaces import ISlaveCommand
12 from buildbot.slave.registry import registerSlaveCommand
14 # this used to be a CVS $-style "Revision" auto-updated keyword, but since I
15 # moved to Darcs as the primary repository, this is updated manually each
16 # time this file is changed. The last cvs_ver that was here was 1.51 .
17 command_version = "2.2"
19 # version history:
20 # >=1.17: commands are interruptable
21 # >=1.28: Arch understands 'revision', added Bazaar
22 # >=1.33: Source classes understand 'retry'
23 # >=1.39: Source classes correctly handle changes in branch (except Git)
24 # Darcs accepts 'revision' (now all do but Git) (well, and P4Sync)
25 # Arch/Baz should accept 'build-config'
26 # >=1.51: (release 0.7.3)
27 # >= 2.1: SlaveShellCommand now accepts 'initial_stdin', 'keep_stdin_open',
28 # and 'logfiles'. It now sends 'log' messages in addition to
29 # stdout/stdin/header/rc. It acquired writeStdin/closeStdin methods,
30 # but these are not remotely callable yet.
31 # (not externally visible: ShellCommandPP has writeStdin/closeStdin.
32 # ShellCommand accepts new arguments (logfiles=, initialStdin=,
33 # keepStdinOpen=) and no longer accepts stdin=)
34 # (release 0.7.4)
35 # >= 2.2: added monotone, uploadFile, and downloadFile
37 class CommandInterrupted(Exception):
38 pass
39 class TimeoutError(Exception):
40 pass
42 class AbandonChain(Exception):
43 """A series of chained steps can raise this exception to indicate that
44 one of the intermediate ShellCommands has failed, such that there is no
45 point in running the remainder. 'rc' should be the non-zero exit code of
46 the failing ShellCommand."""
48 def __repr__(self):
49 return "<AbandonChain rc=%s>" % self.args[0]
51 def getCommand(name):
52 possibles = which(name)
53 if not possibles:
54 raise RuntimeError("Couldn't find executable for '%s'" % name)
55 return possibles[0]
57 def rmdirRecursive(dir):
58 """This is a replacement for shutil.rmtree that works better under
59 windows. Thanks to Bear at the OSAF for the code."""
60 if not os.path.exists(dir):
61 return
63 if os.path.islink(dir):
64 os.remove(dir)
65 return
67 for name in os.listdir(dir):
68 full_name = os.path.join(dir, name)
69 # on Windows, if we don't have write permission we can't remove
70 # the file/directory either, so turn that on
71 if os.name == 'nt':
72 if not os.access(full_name, os.W_OK):
73 os.chmod(full_name, 0600)
74 if os.path.isdir(full_name):
75 rmdirRecursive(full_name)
76 else:
77 # print "removing file", full_name
78 os.remove(full_name)
79 os.rmdir(dir)
81 class ShellCommandPP(ProcessProtocol):
82 debug = False
84 def __init__(self, command):
85 self.command = command
86 self.pending_stdin = ""
87 self.stdin_finished = False
89 def writeStdin(self, data):
90 assert not self.stdin_finished
91 if self.connected:
92 self.transport.write(data)
93 else:
94 self.pending_stdin += data
96 def closeStdin(self):
97 if self.connected:
98 if self.debug: log.msg(" closing stdin")
99 self.transport.closeStdin()
100 self.stdin_finished = True
102 def connectionMade(self):
103 if self.debug:
104 log.msg("ShellCommandPP.connectionMade")
105 if not self.command.process:
106 if self.debug:
107 log.msg(" assigning self.command.process: %s" %
108 (self.transport,))
109 self.command.process = self.transport
111 # TODO: maybe we shouldn't close stdin when using a PTY. I can't test
112 # this yet, recent debian glibc has a bug which causes thread-using
113 # test cases to SIGHUP trial, and the workaround is to either run
114 # the whole test with /bin/sh -c " ".join(argv) (way gross) or to
115 # not use a PTY. Once the bug is fixed, I'll be able to test what
116 # happens when you close stdin on a pty. My concern is that it will
117 # SIGHUP the child (since we are, in a sense, hanging up on them).
118 # But it may well be that keeping stdout open prevents the SIGHUP
119 # from being sent.
120 #if not self.command.usePTY:
122 if self.pending_stdin:
123 if self.debug: log.msg(" writing to stdin")
124 self.transport.write(self.pending_stdin)
125 if self.stdin_finished:
126 if self.debug: log.msg(" closing stdin")
127 self.transport.closeStdin()
129 def outReceived(self, data):
130 if self.debug:
131 log.msg("ShellCommandPP.outReceived")
132 self.command.addStdout(data)
134 def errReceived(self, data):
135 if self.debug:
136 log.msg("ShellCommandPP.errReceived")
137 self.command.addStderr(data)
139 def processEnded(self, status_object):
140 if self.debug:
141 log.msg("ShellCommandPP.processEnded", status_object)
142 # status_object is a Failure wrapped around an
143 # error.ProcessTerminated or and error.ProcessDone.
144 # requires twisted >= 1.0.4 to overcome a bug in process.py
145 sig = status_object.value.signal
146 rc = status_object.value.exitCode
147 self.command.finished(sig, rc)
149 class LogFileWatcher:
150 POLL_INTERVAL = 2
152 def __init__(self, command, name, logfile):
153 self.command = command
154 self.name = name
155 self.logfile = logfile
156 log.msg("LogFileWatcher created to watch %s" % logfile)
157 # we are created before the ShellCommand starts. If the logfile we're
158 # supposed to be watching already exists, record its size and
159 # ctime/mtime so we can tell when it starts to change.
160 self.old_logfile_stats = self.statFile()
161 self.started = False
163 # every 2 seconds we check on the file again
164 self.poller = task.LoopingCall(self.poll)
166 def start(self):
167 self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
169 def _cleanupPoll(self, err):
170 log.err(err, msg="Polling error")
171 self.poller = None
173 def stop(self):
174 self.poll()
175 if self.poller is not None:
176 self.poller.stop()
177 if self.started:
178 self.f.close()
180 def statFile(self):
181 if os.path.exists(self.logfile):
182 s = os.stat(self.logfile)
183 return (s[ST_CTIME], s[ST_MTIME], s[ST_SIZE])
184 return None
186 def poll(self):
187 if not self.started:
188 s = self.statFile()
189 if s == self.old_logfile_stats:
190 return # not started yet
191 if not s:
192 # the file was there, but now it's deleted. Forget about the
193 # initial state, clearly the process has deleted the logfile
194 # in preparation for creating a new one.
195 self.old_logfile_stats = None
196 return # no file to work with
197 self.f = open(self.logfile, "rb")
198 self.started = True
199 self.f.seek(self.f.tell(), 0)
200 while True:
201 data = self.f.read(10000)
202 if not data:
203 return
204 self.command.addLogfile(self.name, data)
207 class ShellCommand:
208 # This is a helper class, used by SlaveCommands to run programs in a
209 # child shell.
211 notreally = False
212 BACKUP_TIMEOUT = 5
213 KILL = "KILL"
215 def __init__(self, builder, command,
216 workdir, environ=None,
217 sendStdout=True, sendStderr=True, sendRC=True,
218 timeout=None, initialStdin=None, keepStdinOpen=False,
219 keepStdout=False,
220 logfiles={}):
223 @param keepStdout: if True, we keep a copy of all the stdout text
224 that we've seen. This copy is available in
225 self.stdout, which can be read after the command
226 has finished.
230 self.builder = builder
231 self.command = command
232 self.sendStdout = sendStdout
233 self.sendStderr = sendStderr
234 self.sendRC = sendRC
235 self.logfiles = logfiles
236 self.workdir = workdir
237 self.environ = os.environ.copy()
238 if environ:
239 if environ.has_key('PYTHONPATH'):
240 ppath = environ['PYTHONPATH']
241 # Need to do os.pathsep translation. We could either do that
242 # by replacing all incoming ':'s with os.pathsep, or by
243 # accepting lists. I like lists better.
244 if not isinstance(ppath, str):
245 # If it's not a string, treat it as a sequence to be
246 # turned in to a string.
247 ppath = os.pathsep.join(ppath)
249 if self.environ.has_key('PYTHONPATH'):
250 # special case, prepend the builder's items to the
251 # existing ones. This will break if you send over empty
252 # strings, so don't do that.
253 ppath = ppath + os.pathsep + self.environ['PYTHONPATH']
255 environ['PYTHONPATH'] = ppath
257 self.environ.update(environ)
258 self.initialStdin = initialStdin
259 self.keepStdinOpen = keepStdinOpen
260 self.timeout = timeout
261 self.timer = None
262 self.keepStdout = keepStdout
264 # usePTY=True is a convenience for cleaning up all children and
265 # grandchildren of a hung command. Fall back to usePTY=False on
266 # systems where ptys cause problems.
268 self.usePTY = self.builder.usePTY
269 if runtime.platformType != "posix":
270 self.usePTY = False # PTYs are posix-only
271 if initialStdin is not None:
272 # for .closeStdin to matter, we must use a pipe, not a PTY
273 self.usePTY = False
275 self.logFileWatchers = []
276 for name,filename in self.logfiles.items():
277 w = LogFileWatcher(self, name,
278 os.path.join(self.workdir, filename))
279 self.logFileWatchers.append(w)
281 def __repr__(self):
282 return "<slavecommand.ShellCommand '%s'>" % self.command
284 def sendStatus(self, status):
285 self.builder.sendUpdate(status)
287 def start(self):
288 # return a Deferred which fires (with the exit code) when the command
289 # completes
290 if self.keepStdout:
291 self.stdout = ""
292 self.deferred = defer.Deferred()
293 try:
294 self._startCommand()
295 except:
296 log.msg("error in ShellCommand._startCommand")
297 log.err()
298 # pretend it was a shell error
299 self.deferred.errback(AbandonChain(-1))
300 return self.deferred
302 def _startCommand(self):
303 log.msg("ShellCommand._startCommand")
304 if self.notreally:
305 self.sendStatus({'header': "command '%s' in dir %s" % \
306 (self.command, self.workdir)})
307 self.sendStatus({'header': "(not really)\n"})
308 self.finished(None, 0)
309 return
311 self.pp = ShellCommandPP(self)
313 if type(self.command) in types.StringTypes:
314 if runtime.platformType == 'win32':
315 argv = [os.environ['COMSPEC'], '/c', self.command]
316 else:
317 # for posix, use /bin/sh. for other non-posix, well, doesn't
318 # hurt to try
319 argv = ['/bin/sh', '-c', self.command]
320 else:
321 if runtime.platformType == 'win32':
322 argv = [os.environ['COMSPEC'], '/c'] + list(self.command)
323 else:
324 argv = self.command
326 # self.stdin is handled in ShellCommandPP.connectionMade
328 # first header line is the command in plain text, argv joined with
329 # spaces. You should be able to cut-and-paste this into a shell to
330 # obtain the same results. If there are spaces in the arguments, too
331 # bad.
332 msg = " ".join(argv)
333 log.msg(" " + msg)
334 self.sendStatus({'header': msg+"\n"})
336 # then comes the secondary information
337 msg = " in dir %s" % (self.workdir,)
338 if self.timeout:
339 msg += " (timeout %d secs)" % (self.timeout,)
340 log.msg(" " + msg)
341 self.sendStatus({'header': msg+"\n"})
343 msg = " watching logfiles %s" % (self.logfiles,)
344 log.msg(" " + msg)
345 self.sendStatus({'header': msg+"\n"})
347 # then the argv array for resolving unambiguity
348 msg = " argv: %s" % (argv,)
349 log.msg(" " + msg)
350 self.sendStatus({'header': msg+"\n"})
352 # then the environment, since it sometimes causes problems
353 msg = " environment: %s" % (self.environ,)
354 log.msg(" " + msg)
355 self.sendStatus({'header': msg+"\n"})
357 # this will be buffered until connectionMade is called
358 if self.initialStdin:
359 self.pp.writeStdin(self.initialStdin)
360 if not self.keepStdinOpen:
361 self.pp.closeStdin()
363 # win32eventreactor's spawnProcess (under twisted <= 2.0.1) returns
364 # None, as opposed to all the posixbase-derived reactors (which
365 # return the new Process object). This is a nuisance. We can make up
366 # for it by having the ProcessProtocol give us their .transport
367 # attribute after they get one. I'd prefer to get it from
368 # spawnProcess because I'm concerned about returning from this method
369 # without having a valid self.process to work with. (if kill() were
370 # called right after we return, but somehow before connectionMade
371 # were called, then kill() would blow up).
372 self.process = None
373 p = reactor.spawnProcess(self.pp, argv[0], argv,
374 self.environ,
375 self.workdir,
376 usePTY=self.usePTY)
377 # connectionMade might have been called during spawnProcess
378 if not self.process:
379 self.process = p
381 # connectionMade also closes stdin as long as we're not using a PTY.
382 # This is intended to kill off inappropriately interactive commands
383 # better than the (long) hung-command timeout. ProcessPTY should be
384 # enhanced to allow the same childFDs argument that Process takes,
385 # which would let us connect stdin to /dev/null .
387 if self.timeout:
388 self.timer = reactor.callLater(self.timeout, self.doTimeout)
390 for w in self.logFileWatchers:
391 w.start()
394 def addStdout(self, data):
395 if self.sendStdout:
396 self.sendStatus({'stdout': data})
397 if self.keepStdout:
398 self.stdout += data
399 if self.timer:
400 self.timer.reset(self.timeout)
402 def addStderr(self, data):
403 if self.sendStderr:
404 self.sendStatus({'stderr': data})
405 if self.timer:
406 self.timer.reset(self.timeout)
408 def addLogfile(self, name, data):
409 self.sendStatus({'log': (name, data)})
410 if self.timer:
411 self.timer.reset(self.timeout)
413 def finished(self, sig, rc):
414 log.msg("command finished with signal %s, exit code %s" % (sig,rc))
415 for w in self.logFileWatchers:
416 # this will send the final updates
417 w.stop()
418 if sig is not None:
419 rc = -1
420 if self.sendRC:
421 if sig is not None:
422 self.sendStatus(
423 {'header': "process killed by signal %d\n" % sig})
424 self.sendStatus({'rc': rc})
425 if self.timer:
426 self.timer.cancel()
427 self.timer = None
428 d = self.deferred
429 self.deferred = None
430 if d:
431 d.callback(rc)
432 else:
433 log.msg("Hey, command %s finished twice" % self)
435 def failed(self, why):
436 log.msg("ShellCommand.failed: command failed: %s" % (why,))
437 if self.timer:
438 self.timer.cancel()
439 self.timer = None
440 d = self.deferred
441 self.deferred = None
442 if d:
443 d.errback(why)
444 else:
445 log.msg("Hey, command %s finished twice" % self)
447 def doTimeout(self):
448 self.timer = None
449 msg = "command timed out: %d seconds without output" % self.timeout
450 self.kill(msg)
452 def kill(self, msg):
453 # This may be called by the timeout, or when the user has decided to
454 # abort this build.
455 if self.timer:
456 self.timer.cancel()
457 self.timer = None
458 if hasattr(self.process, "pid"):
459 msg += ", killing pid %d" % self.process.pid
460 log.msg(msg)
461 self.sendStatus({'header': "\n" + msg + "\n"})
463 hit = 0
464 if runtime.platformType == "posix":
465 try:
466 # really want to kill off all child processes too. Process
467 # Groups are ideal for this, but that requires
468 # spawnProcess(usePTY=1). Try both ways in case process was
469 # not started that way.
471 # the test suite sets self.KILL=None to tell us we should
472 # only pretend to kill the child. This lets us test the
473 # backup timer.
475 sig = None
476 if self.KILL is not None:
477 sig = getattr(signal, "SIG"+ self.KILL, None)
479 if self.KILL == None:
480 log.msg("self.KILL==None, only pretending to kill child")
481 elif sig is None:
482 log.msg("signal module is missing SIG%s" % self.KILL)
483 elif not hasattr(os, "kill"):
484 log.msg("os module is missing the 'kill' function")
485 else:
486 log.msg("trying os.kill(-pid, %d)" % (sig,))
487 # TODO: maybe use os.killpg instead of a negative pid?
488 os.kill(-self.process.pid, sig)
489 log.msg(" signal %s sent successfully" % sig)
490 hit = 1
491 except OSError:
492 # probably no-such-process, maybe because there is no process
493 # group
494 pass
495 if not hit:
496 try:
497 if self.KILL is None:
498 log.msg("self.KILL==None, only pretending to kill child")
499 else:
500 log.msg("trying process.signalProcess('KILL')")
501 self.process.signalProcess(self.KILL)
502 log.msg(" signal %s sent successfully" % (self.KILL,))
503 hit = 1
504 except OSError:
505 # could be no-such-process, because they finished very recently
506 pass
507 if not hit:
508 log.msg("signalProcess/os.kill failed both times")
510 if runtime.platformType == "posix":
511 # we only do this under posix because the win32eventreactor
512 # blocks here until the process has terminated, while closing
513 # stderr. This is weird.
514 self.pp.transport.loseConnection()
516 # finished ought to be called momentarily. Just in case it doesn't,
517 # set a timer which will abandon the command.
518 self.timer = reactor.callLater(self.BACKUP_TIMEOUT,
519 self.doBackupTimeout)
521 def doBackupTimeout(self):
522 log.msg("we tried to kill the process, and it wouldn't die.."
523 " finish anyway")
524 self.timer = None
525 self.sendStatus({'header': "SIGKILL failed to kill process\n"})
526 if self.sendRC:
527 self.sendStatus({'header': "using fake rc=-1\n"})
528 self.sendStatus({'rc': -1})
529 self.failed(TimeoutError("SIGKILL failed to kill process"))
532 def writeStdin(self, data):
533 self.pp.writeStdin(data)
535 def closeStdin(self):
536 self.pp.closeStdin()
539 class Command:
540 if implements:
541 implements(ISlaveCommand)
542 else:
543 __implements__ = ISlaveCommand
545 """This class defines one command that can be invoked by the build master.
546 The command is executed on the slave side, and always sends back a
547 completion message when it finishes. It may also send intermediate status
548 as it runs (by calling builder.sendStatus). Some commands can be
549 interrupted (either by the build master or a local timeout), in which
550 case the step is expected to complete normally with a status message that
551 indicates an error occurred.
553 These commands are used by BuildSteps on the master side. Each kind of
554 BuildStep uses a single Command. The slave must implement all the
555 Commands required by the set of BuildSteps used for any given build:
556 this is checked at startup time.
558 All Commands are constructed with the same signature:
559 c = CommandClass(builder, args)
560 where 'builder' is the parent SlaveBuilder object, and 'args' is a
561 dict that is interpreted per-command.
563 The setup(args) method is available for setup, and is run from __init__.
565 The Command is started with start(). This method must be implemented in a
566 subclass, and it should return a Deferred. When your step is done, you
567 should fire the Deferred (the results are not used). If the command is
568 interrupted, it should fire the Deferred anyway.
570 While the command runs. it may send status messages back to the
571 buildmaster by calling self.sendStatus(statusdict). The statusdict is
572 interpreted by the master-side BuildStep however it likes.
574 A separate completion message is sent when the deferred fires, which
575 indicates that the Command has finished, but does not carry any status
576 data. If the Command needs to return an exit code of some sort, that
577 should be sent as a regular status message before the deferred is fired .
578 Once builder.commandComplete has been run, no more status messages may be
579 sent.
581 If interrupt() is called, the Command should attempt to shut down as
582 quickly as possible. Child processes should be killed, new ones should
583 not be started. The Command should send some kind of error status update,
584 then complete as usual by firing the Deferred.
586 .interrupted should be set by interrupt(), and can be tested to avoid
587 sending multiple error status messages.
589 If .running is False, the bot is shutting down (or has otherwise lost the
590 connection to the master), and should not send any status messages. This
591 is checked in Command.sendStatus .
595 # builder methods:
596 # sendStatus(dict) (zero or more)
597 # commandComplete() or commandInterrupted() (one, at end)
599 debug = False
600 interrupted = False
601 running = False # set by Builder, cleared on shutdown or when the
602 # Deferred fires
604 def __init__(self, builder, stepId, args):
605 self.builder = builder
606 self.stepId = stepId # just for logging
607 self.args = args
608 self.setup(args)
610 def setup(self, args):
611 """Override this in a subclass to extract items from the args dict."""
612 pass
614 def doStart(self):
615 self.running = True
616 d = defer.maybeDeferred(self.start)
617 d.addBoth(self.commandComplete)
618 return d
620 def start(self):
621 """Start the command. This method should return a Deferred that will
622 fire when the command has completed. The Deferred's argument will be
623 ignored.
625 This method should be overridden by subclasses."""
626 raise NotImplementedError, "You must implement this in a subclass"
628 def sendStatus(self, status):
629 """Send a status update to the master."""
630 if self.debug:
631 log.msg("sendStatus", status)
632 if not self.running:
633 log.msg("would sendStatus but not .running")
634 return
635 self.builder.sendUpdate(status)
637 def doInterrupt(self):
638 self.running = False
639 self.interrupt()
641 def interrupt(self):
642 """Override this in a subclass to allow commands to be interrupted.
643 May be called multiple times, test and set self.interrupted=True if
644 this matters."""
645 pass
647 def commandComplete(self, res):
648 self.running = False
649 return res
651 # utility methods, mostly used by SlaveShellCommand and the like
653 def _abandonOnFailure(self, rc):
654 if type(rc) is not int:
655 log.msg("weird, _abandonOnFailure was given rc=%s (%s)" % \
656 (rc, type(rc)))
657 assert isinstance(rc, int)
658 if rc != 0:
659 raise AbandonChain(rc)
660 return rc
662 def _sendRC(self, res):
663 self.sendStatus({'rc': 0})
665 def _checkAbandoned(self, why):
666 log.msg("_checkAbandoned", why)
667 why.trap(AbandonChain)
668 log.msg(" abandoning chain", why.value)
669 self.sendStatus({'rc': why.value.args[0]})
670 return None
674 class SlaveFileUploadCommand(Command):
676 Upload a file from slave to build master
677 Arguments:
679 - ['workdir']: base directory to use
680 - ['slavesrc']: name of the slave-side file to read from
681 - ['writer']: RemoteReference to a transfer._FileWriter object
682 - ['maxsize']: max size (in bytes) of file to write
683 - ['blocksize']: max size for each data block
685 debug = False
687 def setup(self,args):
688 self.workdir = args['workdir']
689 self.filename = os.path.basename(args['slavesrc'])
690 self.writer = args['writer']
691 self.maxsize = args['maxsize']
692 self.blocksize = args['blocksize']
693 self.stderr = None
694 self.rc = 0
696 if self.debug:
697 log.msg('SlaveFileUploadCommand started')
699 # Open file
700 self.path = os.path.join(self.builder.basedir,
701 self.workdir,
702 self.filename)
703 try:
704 self.fp = open(self.path, 'r')
705 if self.debug:
706 log.msg('Opened %r for upload' % self.path)
707 except:
708 self.fp = None
709 self.stderr = 'Cannot open file %r for upload' % self.path
710 self.rc = 1
711 if self.debug:
712 log.msg('Cannot open file %r for upload' % self.path)
715 def start(self):
716 self.cmd = defer.Deferred()
717 reactor.callLater(0, self._writeBlock)
719 return self.cmd
721 def _writeBlock(self):
723 Write a block of data to the remote writer
725 if self.interrupted or self.fp is None:
726 if self.debug:
727 log.msg('SlaveFileUploadCommand._writeBlock(): end')
728 d = self.writer.callRemote('close')
729 d.addCallback(lambda _: self.finished())
730 return
732 length = self.blocksize
733 if self.maxsize is not None and length > self.maxsize:
734 length = self.maxsize
736 if length <= 0:
737 if self.stderr is None:
738 self.stderr = 'Maximum filesize reached, truncating file %r' \
739 % self.path
740 self.rc = 1
741 data = ''
742 else:
743 data = self.fp.read(length)
745 if self.debug:
746 log.msg('SlaveFileUploadCommand._writeBlock(): '+
747 'allowed=%d readlen=%d' % (length,len(data)))
748 if len(data) == 0:
749 d = self.writer.callRemote('close')
750 d.addCallback(lambda _: self.finished())
751 else:
752 if self.maxsize is not None:
753 self.maxsize = self.maxsize - len(data)
754 assert self.maxsize >= 0
755 d = self.writer.callRemote('write',data)
756 d.addCallback(lambda _: self._writeBlock())
759 def interrupt(self):
760 if self.debug:
761 log.msg('interrupted')
762 if self.interrupted:
763 return
764 if self.stderr is None:
765 self.stderr = 'Upload of %r interrupted' % self.path
766 self.rc = 1
767 self.interrupted = True
768 self.finished()
771 def finished(self):
772 if self.debug:
773 log.msg('finished: stderr=%r, rc=%r' % (self.stderr,self.rc))
774 if self.stderr is None:
775 self.sendStatus({'rc':self.rc})
776 else:
777 self.sendStatus({'stderr':self.stderr, 'rc':self.rc})
778 self.cmd.callback(0)
780 registerSlaveCommand("uploadFile", SlaveFileUploadCommand, command_version)
783 class SlaveFileDownloadCommand(Command):
785 Download a file from master to slave
786 Arguments:
788 - ['workdir']: base directory to use
789 - ['slavedest']: name of the slave-side file to be created
790 - ['reader']: RemoteReference to a transfer._FileReader object
791 - ['maxsize']: max size (in bytes) of file to write
792 - ['blocksize']: max size for each data block
794 debug = False
796 def setup(self,args):
797 self.workdir = args['workdir']
798 self.filename = os.path.basename(args['slavedest'])
799 self.reader = args['reader']
800 self.maxsize = args['maxsize']
801 self.blocksize = args['blocksize']
802 self.stderr = None
803 self.rc = 0
805 if self.debug:
806 log.msg('SlaveFileDownloadCommand started')
808 # Open file
809 self.path = os.path.join(self.builder.basedir,
810 self.workdir,
811 self.filename)
812 try:
813 self.fp = open(self.path, 'w')
814 if self.debug:
815 log.msg('Opened %r for download' % self.path)
816 except IOError:
817 self.fp = None
818 self.stderr = 'Cannot open file %r for download' % self.path
819 self.rc = 1
820 if self.debug:
821 log.msg('Cannot open file %r for download' % self.path)
824 def start(self):
825 self.cmd = defer.Deferred()
826 reactor.callLater(0, self._readBlock)
828 return self.cmd
830 def _readBlock(self):
832 Read a block of data from the remote reader
834 if self.interrupted or self.fp is None:
835 if self.debug:
836 log.msg('SlaveFileDownloadCommand._readBlock(): end')
837 d = self.reader.callRemote('close')
838 d.addCallback(lambda _: self.finished())
839 return
841 length = self.blocksize
842 if self.maxsize is not None and length > self.maxsize:
843 length = self.maxsize
845 if length <= 0:
846 if self.stderr is None:
847 self.stderr = 'Maximum filesize reached, truncating file %r' \
848 % self.path
849 self.rc = 1
850 d = self.reader.callRemote('close')
851 d.addCallback(lambda _: self.finished())
852 else:
853 d = self.reader.callRemote('read', length)
854 d.addCallback(self._writeData)
856 def _writeData(self,data):
857 if self.debug:
858 log.msg('SlaveFileDownloadCommand._readBlock(): '+
859 'readlen=%d' % len(data))
860 if len(data) == 0:
861 d = self.reader.callRemote('close')
862 d.addCallback(lambda _: self.finished())
863 else:
864 if self.maxsize is not None:
865 self.maxsize = self.maxsize - len(data)
866 assert self.maxsize >= 0
867 self.fp.write(data)
868 self._readBlock() # setup call back for next block (or finish)
871 def interrupt(self):
872 if self.debug:
873 log.msg('interrupted')
874 if self.interrupted:
875 return
876 if self.stderr is None:
877 self.stderr = 'Download of %r interrupted' % self.path
878 self.rc = 1
879 self.interrupted = True
880 self.finished()
883 def finished(self):
884 if self.fp is not None:
885 self.fp.close()
887 if self.debug:
888 log.msg('finished: stderr=%r, rc=%r' % (self.stderr,self.rc))
889 if self.stderr is None:
890 self.sendStatus({'rc':self.rc})
891 else:
892 self.sendStatus({'stderr':self.stderr, 'rc':self.rc})
893 self.cmd.callback(0)
896 registerSlaveCommand("downloadFile", SlaveFileDownloadCommand, command_version)
900 class SlaveShellCommand(Command):
901 """This is a Command which runs a shell command. The args dict contains
902 the following keys:
904 - ['command'] (required): a shell command to run. If this is a string,
905 it will be run with /bin/sh (['/bin/sh',
906 '-c', command]). If it is a list
907 (preferred), it will be used directly.
908 - ['workdir'] (required): subdirectory in which the command will be
909 run, relative to the builder dir
910 - ['env']: a dict of environment variables to augment/replace
911 os.environ . PYTHONPATH is treated specially, and
912 should be a list of path components to be prepended to
913 any existing PYTHONPATH environment variable.
914 - ['initial_stdin']: a string which will be written to the command's
915 stdin as soon as it starts
916 - ['keep_stdin_open']: unless True, the command's stdin will be
917 closed as soon as initial_stdin has been
918 written. Set this to True if you plan to write
919 to stdin after the command has been started.
920 - ['want_stdout']: 0 if stdout should be thrown away
921 - ['want_stderr']: 0 if stderr should be thrown away
922 - ['not_really']: 1 to skip execution and return rc=0
923 - ['timeout']: seconds of silence to tolerate before killing command
924 - ['logfiles']: dict mapping LogFile name to the workdir-relative
925 filename of a local log file. This local file will be
926 watched just like 'tail -f', and all changes will be
927 written to 'log' status updates.
929 ShellCommand creates the following status messages:
930 - {'stdout': data} : when stdout data is available
931 - {'stderr': data} : when stderr data is available
932 - {'header': data} : when headers (command start/stop) are available
933 - {'log': (logfile_name, data)} : when log files have new contents
934 - {'rc': rc} : when the process has terminated
937 def start(self):
938 args = self.args
939 # args['workdir'] is relative to Builder directory, and is required.
940 assert args['workdir'] is not None
941 workdir = os.path.join(self.builder.basedir, args['workdir'])
943 c = ShellCommand(self.builder, args['command'],
944 workdir, environ=args.get('env'),
945 timeout=args.get('timeout', None),
946 sendStdout=args.get('want_stdout', True),
947 sendStderr=args.get('want_stderr', True),
948 sendRC=True,
949 initialStdin=args.get('initial_stdin'),
950 keepStdinOpen=args.get('keep_stdin_open'),
951 logfiles=args.get('logfiles', {}),
953 self.command = c
954 d = self.command.start()
955 return d
957 def interrupt(self):
958 self.interrupted = True
959 self.command.kill("command interrupted")
961 def writeStdin(self, data):
962 self.command.writeStdin(data)
964 def closeStdin(self):
965 self.command.closeStdin()
967 registerSlaveCommand("shell", SlaveShellCommand, command_version)
970 class DummyCommand(Command):
972 I am a dummy no-op command that by default takes 5 seconds to complete.
973 See L{buildbot.steps.dummy.RemoteDummy}
976 def start(self):
977 self.d = defer.Deferred()
978 log.msg(" starting dummy command [%s]" % self.stepId)
979 self.timer = reactor.callLater(1, self.doStatus)
980 return self.d
982 def interrupt(self):
983 if self.interrupted:
984 return
985 self.timer.cancel()
986 self.timer = None
987 self.interrupted = True
988 self.finished()
990 def doStatus(self):
991 log.msg(" sending intermediate status")
992 self.sendStatus({'stdout': 'data'})
993 timeout = self.args.get('timeout', 5) + 1
994 self.timer = reactor.callLater(timeout - 1, self.finished)
996 def finished(self):
997 log.msg(" dummy command finished [%s]" % self.stepId)
998 if self.interrupted:
999 self.sendStatus({'rc': 1})
1000 else:
1001 self.sendStatus({'rc': 0})
1002 self.d.callback(0)
1004 registerSlaveCommand("dummy", DummyCommand, command_version)
1007 # this maps handle names to a callable. When the WaitCommand starts, this
1008 # callable is invoked with no arguments. It should return a Deferred. When
1009 # that Deferred fires, our WaitCommand will finish.
1010 waitCommandRegistry = {}
1012 class WaitCommand(Command):
1014 I am a dummy command used by the buildbot unit test suite. I want for the
1015 unit test to tell us to finish. See L{buildbot.steps.dummy.Wait}
1018 def start(self):
1019 self.d = defer.Deferred()
1020 log.msg(" starting wait command [%s]" % self.stepId)
1021 handle = self.args['handle']
1022 cb = waitCommandRegistry[handle]
1023 del waitCommandRegistry[handle]
1024 def _called():
1025 log.msg(" wait-%s starting" % (handle,))
1026 d = cb()
1027 def _done(res):
1028 log.msg(" wait-%s finishing: %s" % (handle, res))
1029 return res
1030 d.addBoth(_done)
1031 d.addCallbacks(self.finished, self.failed)
1032 reactor.callLater(0, _called)
1033 return self.d
1035 def interrupt(self):
1036 log.msg(" wait command interrupted")
1037 if self.interrupted:
1038 return
1039 self.interrupted = True
1040 self.finished("interrupted")
1042 def finished(self, res):
1043 log.msg(" wait command finished [%s]" % self.stepId)
1044 if self.interrupted:
1045 self.sendStatus({'rc': 2})
1046 else:
1047 self.sendStatus({'rc': 0})
1048 self.d.callback(0)
1049 def failed(self, why):
1050 log.msg(" wait command failed [%s]" % self.stepId)
1051 self.sendStatus({'rc': 1})
1052 self.d.callback(0)
1054 registerSlaveCommand("dummy.wait", WaitCommand, command_version)
1057 class SourceBase(Command):
1058 """Abstract base class for Version Control System operations (checkout
1059 and update). This class extracts the following arguments from the
1060 dictionary received from the master:
1062 - ['workdir']: (required) the subdirectory where the buildable sources
1063 should be placed
1065 - ['mode']: one of update/copy/clobber/export, defaults to 'update'
1067 - ['revision']: If not None, this is an int or string which indicates
1068 which sources (along a time-like axis) should be used.
1069 It is the thing you provide as the CVS -r or -D
1070 argument.
1072 - ['patch']: If not None, this is a tuple of (striplevel, patch)
1073 which contains a patch that should be applied after the
1074 checkout has occurred. Once applied, the tree is no
1075 longer eligible for use with mode='update', and it only
1076 makes sense to use this in conjunction with a
1077 ['revision'] argument. striplevel is an int, and patch
1078 is a string in standard unified diff format. The patch
1079 will be applied with 'patch -p%d <PATCH', with
1080 STRIPLEVEL substituted as %d. The command will fail if
1081 the patch process fails (rejected hunks).
1083 - ['timeout']: seconds of silence tolerated before we kill off the
1084 command
1086 - ['retry']: If not None, this is a tuple of (delay, repeats)
1087 which means that any failed VC updates should be
1088 reattempted, up to REPEATS times, after a delay of
1089 DELAY seconds. This is intended to deal with slaves
1090 that experience transient network failures.
1093 sourcedata = ""
1095 def setup(self, args):
1096 # if we need to parse the output, use this environment. Otherwise
1097 # command output will be in whatever the buildslave's native language
1098 # has been set to.
1099 self.env = os.environ.copy()
1100 self.env['LC_ALL'] = "C"
1102 self.workdir = args['workdir']
1103 self.mode = args.get('mode', "update")
1104 self.revision = args.get('revision')
1105 self.patch = args.get('patch')
1106 self.timeout = args.get('timeout', 120)
1107 self.retry = args.get('retry')
1108 # VC-specific subclasses should override this to extract more args.
1109 # Make sure to upcall!
1111 def start(self):
1112 self.sendStatus({'header': "starting " + self.header + "\n"})
1113 self.command = None
1115 # self.srcdir is where the VC system should put the sources
1116 if self.mode == "copy":
1117 self.srcdir = "source" # hardwired directory name, sorry
1118 else:
1119 self.srcdir = self.workdir
1120 self.sourcedatafile = os.path.join(self.builder.basedir,
1121 self.srcdir,
1122 ".buildbot-sourcedata")
1124 d = defer.succeed(None)
1125 # do we need to clobber anything?
1126 if self.mode in ("copy", "clobber", "export"):
1127 d.addCallback(self.doClobber, self.workdir)
1128 if not (self.sourcedirIsUpdateable() and self.sourcedataMatches()):
1129 # the directory cannot be updated, so we have to clobber it.
1130 # Perhaps the master just changed modes from 'export' to
1131 # 'update'.
1132 d.addCallback(self.doClobber, self.srcdir)
1134 d.addCallback(self.doVC)
1136 if self.mode == "copy":
1137 d.addCallback(self.doCopy)
1138 if self.patch:
1139 d.addCallback(self.doPatch)
1140 d.addCallbacks(self._sendRC, self._checkAbandoned)
1141 return d
1143 def interrupt(self):
1144 self.interrupted = True
1145 if self.command:
1146 self.command.kill("command interrupted")
1148 def doVC(self, res):
1149 if self.interrupted:
1150 raise AbandonChain(1)
1151 if self.sourcedirIsUpdateable() and self.sourcedataMatches():
1152 d = self.doVCUpdate()
1153 d.addCallback(self.maybeDoVCFallback)
1154 else:
1155 d = self.doVCFull()
1156 d.addBoth(self.maybeDoVCRetry)
1157 d.addCallback(self._abandonOnFailure)
1158 d.addCallback(self._handleGotRevision)
1159 d.addCallback(self.writeSourcedata)
1160 return d
1162 def sourcedataMatches(self):
1163 try:
1164 olddata = open(self.sourcedatafile, "r").read()
1165 if olddata != self.sourcedata:
1166 return False
1167 except IOError:
1168 return False
1169 return True
1171 def _handleGotRevision(self, res):
1172 d = defer.maybeDeferred(self.parseGotRevision)
1173 d.addCallback(lambda got_revision:
1174 self.sendStatus({'got_revision': got_revision}))
1175 return d
1177 def parseGotRevision(self):
1178 """Override this in a subclass. It should return a string that
1179 represents which revision was actually checked out, or a Deferred
1180 that will fire with such a string. If, in a future build, you were to
1181 pass this 'got_revision' string in as the 'revision' component of a
1182 SourceStamp, you should wind up with the same source code as this
1183 checkout just obtained.
1185 It is probably most useful to scan self.command.stdout for a string
1186 of some sort. Be sure to set keepStdout=True on the VC command that
1187 you run, so that you'll have something available to look at.
1189 If this information is unavailable, just return None."""
1191 return None
1193 def writeSourcedata(self, res):
1194 open(self.sourcedatafile, "w").write(self.sourcedata)
1195 return res
1197 def sourcedirIsUpdateable(self):
1198 raise NotImplementedError("this must be implemented in a subclass")
1200 def doVCUpdate(self):
1201 raise NotImplementedError("this must be implemented in a subclass")
1203 def doVCFull(self):
1204 raise NotImplementedError("this must be implemented in a subclass")
1206 def maybeDoVCFallback(self, rc):
1207 if type(rc) is int and rc == 0:
1208 return rc
1209 if self.interrupted:
1210 raise AbandonChain(1)
1211 msg = "update failed, clobbering and trying again"
1212 self.sendStatus({'header': msg + "\n"})
1213 log.msg(msg)
1214 d = self.doClobber(None, self.srcdir)
1215 d.addCallback(self.doVCFallback2)
1216 return d
1218 def doVCFallback2(self, res):
1219 msg = "now retrying VC operation"
1220 self.sendStatus({'header': msg + "\n"})
1221 log.msg(msg)
1222 d = self.doVCFull()
1223 d.addBoth(self.maybeDoVCRetry)
1224 d.addCallback(self._abandonOnFailure)
1225 return d
1227 def maybeDoVCRetry(self, res):
1228 """We get here somewhere after a VC chain has finished. res could
1229 be::
1231 - 0: the operation was successful
1232 - nonzero: the operation failed. retry if possible
1233 - AbandonChain: the operation failed, someone else noticed. retry.
1234 - Failure: some other exception, re-raise
1237 if isinstance(res, failure.Failure):
1238 if self.interrupted:
1239 return res # don't re-try interrupted builds
1240 res.trap(AbandonChain)
1241 else:
1242 if type(res) is int and res == 0:
1243 return res
1244 if self.interrupted:
1245 raise AbandonChain(1)
1246 # if we get here, we should retry, if possible
1247 if self.retry:
1248 delay, repeats = self.retry
1249 if repeats >= 0:
1250 self.retry = (delay, repeats-1)
1251 msg = ("update failed, trying %d more times after %d seconds"
1252 % (repeats, delay))
1253 self.sendStatus({'header': msg + "\n"})
1254 log.msg(msg)
1255 d = defer.Deferred()
1256 d.addCallback(lambda res: self.doVCFull())
1257 d.addBoth(self.maybeDoVCRetry)
1258 reactor.callLater(delay, d.callback, None)
1259 return d
1260 return res
1262 def doClobber(self, dummy, dirname):
1263 # TODO: remove the old tree in the background
1264 ## workdir = os.path.join(self.builder.basedir, self.workdir)
1265 ## deaddir = self.workdir + ".deleting"
1266 ## if os.path.isdir(workdir):
1267 ## try:
1268 ## os.rename(workdir, deaddir)
1269 ## # might fail if deaddir already exists: previous deletion
1270 ## # hasn't finished yet
1271 ## # start the deletion in the background
1272 ## # TODO: there was a solaris/NetApp/NFS problem where a
1273 ## # process that was still running out of the directory we're
1274 ## # trying to delete could prevent the rm-rf from working. I
1275 ## # think it stalled the rm, but maybe it just died with
1276 ## # permission issues. Try to detect this.
1277 ## os.commands("rm -rf %s &" % deaddir)
1278 ## except:
1279 ## # fall back to sequential delete-then-checkout
1280 ## pass
1281 d = os.path.join(self.builder.basedir, dirname)
1282 if runtime.platformType != "posix":
1283 # if we're running on w32, use rmtree instead. It will block,
1284 # but hopefully it won't take too long.
1285 rmdirRecursive(d)
1286 return defer.succeed(0)
1287 command = ["rm", "-rf", d]
1288 c = ShellCommand(self.builder, command, self.builder.basedir,
1289 sendRC=0, timeout=self.timeout)
1290 self.command = c
1291 # sendRC=0 means the rm command will send stdout/stderr to the
1292 # master, but not the rc=0 when it finishes. That job is left to
1293 # _sendRC
1294 d = c.start()
1295 d.addCallback(self._abandonOnFailure)
1296 return d
1298 def doCopy(self, res):
1299 # now copy tree to workdir
1300 fromdir = os.path.join(self.builder.basedir, self.srcdir)
1301 todir = os.path.join(self.builder.basedir, self.workdir)
1302 if runtime.platformType != "posix":
1303 shutil.copytree(fromdir, todir)
1304 return defer.succeed(0)
1305 command = ['cp', '-r', '-p', fromdir, todir]
1306 c = ShellCommand(self.builder, command, self.builder.basedir,
1307 sendRC=False, timeout=self.timeout)
1308 self.command = c
1309 d = c.start()
1310 d.addCallback(self._abandonOnFailure)
1311 return d
1313 def doPatch(self, res):
1314 patchlevel, diff = self.patch
1315 command = [getCommand("patch"), '-p%d' % patchlevel]
1316 dir = os.path.join(self.builder.basedir, self.workdir)
1317 # mark the directory so we don't try to update it later
1318 open(os.path.join(dir, ".buildbot-patched"), "w").write("patched\n")
1319 # now apply the patch
1320 c = ShellCommand(self.builder, command, dir,
1321 sendRC=False, timeout=self.timeout,
1322 initialStdin=diff)
1323 self.command = c
1324 d = c.start()
1325 d.addCallback(self._abandonOnFailure)
1326 return d
1329 class CVS(SourceBase):
1330 """CVS-specific VC operation. In addition to the arguments handled by
1331 SourceBase, this command reads the following keys:
1333 ['cvsroot'] (required): the CVSROOT repository string
1334 ['cvsmodule'] (required): the module to be retrieved
1335 ['branch']: a '-r' tag or branch name to use for the checkout/update
1336 ['login']: a string for use as a password to 'cvs login'
1337 ['global_options']: a list of strings to use before the CVS verb
1340 header = "cvs operation"
1342 def setup(self, args):
1343 SourceBase.setup(self, args)
1344 self.vcexe = getCommand("cvs")
1345 self.cvsroot = args['cvsroot']
1346 self.cvsmodule = args['cvsmodule']
1347 self.global_options = args.get('global_options', [])
1348 self.branch = args.get('branch')
1349 self.login = args.get('login')
1350 self.sourcedata = "%s\n%s\n%s\n" % (self.cvsroot, self.cvsmodule,
1351 self.branch)
1353 def sourcedirIsUpdateable(self):
1354 if os.path.exists(os.path.join(self.builder.basedir,
1355 self.srcdir, ".buildbot-patched")):
1356 return False
1357 return os.path.isdir(os.path.join(self.builder.basedir,
1358 self.srcdir, "CVS"))
1360 def start(self):
1361 if self.login is not None:
1362 # need to do a 'cvs login' command first
1363 d = self.builder.basedir
1364 command = ([self.vcexe, '-d', self.cvsroot] + self.global_options
1365 + ['login'])
1366 c = ShellCommand(self.builder, command, d,
1367 sendRC=False, timeout=self.timeout,
1368 initialStdin=self.login+"\n")
1369 self.command = c
1370 d = c.start()
1371 d.addCallback(self._abandonOnFailure)
1372 d.addCallback(self._didLogin)
1373 return d
1374 else:
1375 return self._didLogin(None)
1377 def _didLogin(self, res):
1378 # now we really start
1379 return SourceBase.start(self)
1381 def doVCUpdate(self):
1382 d = os.path.join(self.builder.basedir, self.srcdir)
1383 command = [self.vcexe, '-z3'] + self.global_options + ['update', '-dP']
1384 if self.branch:
1385 command += ['-r', self.branch]
1386 if self.revision:
1387 command += ['-D', self.revision]
1388 c = ShellCommand(self.builder, command, d,
1389 sendRC=False, timeout=self.timeout)
1390 self.command = c
1391 return c.start()
1393 def doVCFull(self):
1394 d = self.builder.basedir
1395 if self.mode == "export":
1396 verb = "export"
1397 else:
1398 verb = "checkout"
1399 command = ([self.vcexe, '-d', self.cvsroot, '-z3'] +
1400 self.global_options +
1401 [verb, '-d', self.srcdir])
1402 if self.branch:
1403 command += ['-r', self.branch]
1404 if self.revision:
1405 command += ['-D', self.revision]
1406 command += [self.cvsmodule]
1407 c = ShellCommand(self.builder, command, d,
1408 sendRC=False, timeout=self.timeout)
1409 self.command = c
1410 return c.start()
1412 def parseGotRevision(self):
1413 # CVS does not have any kind of revision stamp to speak of. We return
1414 # the current timestamp as a best-effort guess, but this depends upon
1415 # the local system having a clock that is
1416 # reasonably-well-synchronized with the repository.
1417 return time.strftime("%Y-%m-%d %H:%M:%S +0000", time.gmtime())
1419 registerSlaveCommand("cvs", CVS, command_version)
1421 class SVN(SourceBase):
1422 """Subversion-specific VC operation. In addition to the arguments
1423 handled by SourceBase, this command reads the following keys:
1425 ['svnurl'] (required): the SVN repository string
1428 header = "svn operation"
1430 def setup(self, args):
1431 SourceBase.setup(self, args)
1432 self.vcexe = getCommand("svn")
1433 self.svnurl = args['svnurl']
1434 self.sourcedata = "%s\n" % self.svnurl
1436 def sourcedirIsUpdateable(self):
1437 if os.path.exists(os.path.join(self.builder.basedir,
1438 self.srcdir, ".buildbot-patched")):
1439 return False
1440 return os.path.isdir(os.path.join(self.builder.basedir,
1441 self.srcdir, ".svn"))
1443 def doVCUpdate(self):
1444 revision = self.args['revision'] or 'HEAD'
1445 # update: possible for mode in ('copy', 'update')
1446 d = os.path.join(self.builder.basedir, self.srcdir)
1447 command = [self.vcexe, 'update', '--revision', str(revision),
1448 '--non-interactive']
1449 c = ShellCommand(self.builder, command, d,
1450 sendRC=False, timeout=self.timeout,
1451 keepStdout=True)
1452 self.command = c
1453 return c.start()
1455 def doVCFull(self):
1456 revision = self.args['revision'] or 'HEAD'
1457 d = self.builder.basedir
1458 if self.mode == "export":
1459 command = [self.vcexe, 'export', '--revision', str(revision),
1460 '--non-interactive',
1461 self.svnurl, self.srcdir]
1462 else:
1463 # mode=='clobber', or copy/update on a broken workspace
1464 command = [self.vcexe, 'checkout', '--revision', str(revision),
1465 '--non-interactive',
1466 self.svnurl, self.srcdir]
1467 c = ShellCommand(self.builder, command, d,
1468 sendRC=False, timeout=self.timeout,
1469 keepStdout=True)
1470 self.command = c
1471 return c.start()
1473 def parseGotRevision(self):
1474 # svn checkout operations finish with 'Checked out revision 16657.'
1475 # svn update operations finish the line 'At revision 16654.'
1476 # But we don't use those. Instead, run 'svnversion'.
1477 svnversion_command = getCommand("svnversion")
1478 # older versions of 'svnversion' (1.1.4) require the WC_PATH
1479 # argument, newer ones (1.3.1) do not.
1480 command = [svnversion_command, "."]
1481 c = ShellCommand(self.builder, command,
1482 os.path.join(self.builder.basedir, self.srcdir),
1483 environ=self.env,
1484 sendStdout=False, sendStderr=False, sendRC=False,
1485 keepStdout=True)
1486 c.usePTY = False
1487 d = c.start()
1488 def _parse(res):
1489 r = c.stdout.strip()
1490 got_version = None
1491 try:
1492 got_version = int(r)
1493 except ValueError:
1494 msg =("SVN.parseGotRevision unable to parse output "
1495 "of svnversion: '%s'" % r)
1496 log.msg(msg)
1497 self.sendStatus({'header': msg + "\n"})
1498 return got_version
1499 d.addCallback(_parse)
1500 return d
1503 registerSlaveCommand("svn", SVN, command_version)
1505 class Darcs(SourceBase):
1506 """Darcs-specific VC operation. In addition to the arguments
1507 handled by SourceBase, this command reads the following keys:
1509 ['repourl'] (required): the Darcs repository string
1512 header = "darcs operation"
1514 def setup(self, args):
1515 SourceBase.setup(self, args)
1516 self.vcexe = getCommand("darcs")
1517 self.repourl = args['repourl']
1518 self.sourcedata = "%s\n" % self.repourl
1519 self.revision = self.args.get('revision')
1521 def sourcedirIsUpdateable(self):
1522 if os.path.exists(os.path.join(self.builder.basedir,
1523 self.srcdir, ".buildbot-patched")):
1524 return False
1525 if self.revision:
1526 # checking out a specific revision requires a full 'darcs get'
1527 return False
1528 return os.path.isdir(os.path.join(self.builder.basedir,
1529 self.srcdir, "_darcs"))
1531 def doVCUpdate(self):
1532 assert not self.revision
1533 # update: possible for mode in ('copy', 'update')
1534 d = os.path.join(self.builder.basedir, self.srcdir)
1535 command = [self.vcexe, 'pull', '--all', '--verbose']
1536 c = ShellCommand(self.builder, command, d,
1537 sendRC=False, timeout=self.timeout)
1538 self.command = c
1539 return c.start()
1541 def doVCFull(self):
1542 # checkout or export
1543 d = self.builder.basedir
1544 command = [self.vcexe, 'get', '--verbose', '--partial',
1545 '--repo-name', self.srcdir]
1546 if self.revision:
1547 # write the context to a file
1548 n = os.path.join(self.builder.basedir, ".darcs-context")
1549 f = open(n, "wb")
1550 f.write(self.revision)
1551 f.close()
1552 # tell Darcs to use that context
1553 command.append('--context')
1554 command.append(n)
1555 command.append(self.repourl)
1557 c = ShellCommand(self.builder, command, d,
1558 sendRC=False, timeout=self.timeout)
1559 self.command = c
1560 d = c.start()
1561 if self.revision:
1562 d.addCallback(self.removeContextFile, n)
1563 return d
1565 def removeContextFile(self, res, n):
1566 os.unlink(n)
1567 return res
1569 def parseGotRevision(self):
1570 # we use 'darcs context' to find out what we wound up with
1571 command = [self.vcexe, "changes", "--context"]
1572 c = ShellCommand(self.builder, command,
1573 os.path.join(self.builder.basedir, self.srcdir),
1574 environ=self.env,
1575 sendStdout=False, sendStderr=False, sendRC=False,
1576 keepStdout=True)
1577 c.usePTY = False
1578 d = c.start()
1579 d.addCallback(lambda res: c.stdout)
1580 return d
1582 registerSlaveCommand("darcs", Darcs, command_version)
1584 class Monotone(SourceBase):
1585 """Monotone-specific VC operation. In addition to the arguments handled
1586 by SourceBase, this command reads the following keys:
1588 ['server_addr'] (required): the address of the server to pull from
1589 ['branch'] (required): the branch the revision is on
1590 ['db_path'] (required): the local database path to use
1591 ['revision'] (required): the revision to check out
1592 ['monotone']: (required): path to monotone executable
1595 header = "monotone operation"
1597 def setup(self, args):
1598 SourceBase.setup(self, args)
1599 self.server_addr = args["server_addr"]
1600 self.branch = args["branch"]
1601 self.db_path = args["db_path"]
1602 self.revision = args["revision"]
1603 self.monotone = args["monotone"]
1604 self._made_fulls = False
1605 self._pull_timeout = args["timeout"]
1607 def _makefulls(self):
1608 if not self._made_fulls:
1609 basedir = self.builder.basedir
1610 self.full_db_path = os.path.join(basedir, self.db_path)
1611 self.full_srcdir = os.path.join(basedir, self.srcdir)
1612 self._made_fulls = True
1614 def sourcedirIsUpdateable(self):
1615 self._makefulls()
1616 if os.path.exists(os.path.join(self.full_srcdir,
1617 ".buildbot_patched")):
1618 return False
1619 return (os.path.isfile(self.full_db_path)
1620 and os.path.isdir(os.path.join(self.full_srcdir, "MT")))
1622 def doVCUpdate(self):
1623 return self._withFreshDb(self._doUpdate)
1625 def _doUpdate(self):
1626 # update: possible for mode in ('copy', 'update')
1627 command = [self.monotone, "update",
1628 "-r", self.revision,
1629 "-b", self.branch]
1630 c = ShellCommand(self.builder, command, self.full_srcdir,
1631 sendRC=False, timeout=self.timeout)
1632 self.command = c
1633 return c.start()
1635 def doVCFull(self):
1636 return self._withFreshDb(self._doFull)
1638 def _doFull(self):
1639 command = [self.monotone, "--db=" + self.full_db_path,
1640 "checkout",
1641 "-r", self.revision,
1642 "-b", self.branch,
1643 self.full_srcdir]
1644 c = ShellCommand(self.builder, command, self.builder.basedir,
1645 sendRC=False, timeout=self.timeout)
1646 self.command = c
1647 return c.start()
1649 def _withFreshDb(self, callback):
1650 self._makefulls()
1651 # first ensure the db exists and is usable
1652 if os.path.isfile(self.full_db_path):
1653 # already exists, so run 'db migrate' in case monotone has been
1654 # upgraded under us
1655 command = [self.monotone, "db", "migrate",
1656 "--db=" + self.full_db_path]
1657 else:
1658 # We'll be doing an initial pull, so up the timeout to 3 hours to
1659 # make sure it will have time to complete.
1660 self._pull_timeout = max(self._pull_timeout, 3 * 60 * 60)
1661 self.sendStatus({"header": "creating database %s\n"
1662 % (self.full_db_path,)})
1663 command = [self.monotone, "db", "init",
1664 "--db=" + self.full_db_path]
1665 c = ShellCommand(self.builder, command, self.builder.basedir,
1666 sendRC=False, timeout=self.timeout)
1667 self.command = c
1668 d = c.start()
1669 d.addCallback(self._abandonOnFailure)
1670 d.addCallback(self._didDbInit)
1671 d.addCallback(self._didPull, callback)
1672 return d
1674 def _didDbInit(self, res):
1675 command = [self.monotone, "--db=" + self.full_db_path,
1676 "pull", "--ticker=dot", self.server_addr, self.branch]
1677 c = ShellCommand(self.builder, command, self.builder.basedir,
1678 sendRC=False, timeout=self._pull_timeout)
1679 self.sendStatus({"header": "pulling %s from %s\n"
1680 % (self.branch, self.server_addr)})
1681 self.command = c
1682 return c.start()
1684 def _didPull(self, res, callback):
1685 return callback()
1687 registerSlaveCommand("monotone", Monotone, command_version)
1690 class Git(SourceBase):
1691 """Git specific VC operation. In addition to the arguments
1692 handled by SourceBase, this command reads the following keys:
1694 ['repourl'] (required): the Cogito repository string
1697 header = "git operation"
1699 def setup(self, args):
1700 SourceBase.setup(self, args)
1701 self.repourl = args['repourl']
1702 #self.sourcedata = "" # TODO
1704 def sourcedirIsUpdateable(self):
1705 if os.path.exists(os.path.join(self.builder.basedir,
1706 self.srcdir, ".buildbot-patched")):
1707 return False
1708 return os.path.isdir(os.path.join(self.builder.basedir,
1709 self.srcdir, ".git"))
1711 def doVCUpdate(self):
1712 d = os.path.join(self.builder.basedir, self.srcdir)
1713 command = ['cg-update']
1714 c = ShellCommand(self.builder, command, d,
1715 sendRC=False, timeout=self.timeout)
1716 self.command = c
1717 return c.start()
1719 def doVCFull(self):
1720 d = os.path.join(self.builder.basedir, self.srcdir)
1721 os.mkdir(d)
1722 command = ['cg-clone', '-s', self.repourl]
1723 c = ShellCommand(self.builder, command, d,
1724 sendRC=False, timeout=self.timeout)
1725 self.command = c
1726 return c.start()
1728 registerSlaveCommand("git", Git, command_version)
1730 class Arch(SourceBase):
1731 """Arch-specific (tla-specific) VC operation. In addition to the
1732 arguments handled by SourceBase, this command reads the following keys:
1734 ['url'] (required): the repository string
1735 ['version'] (required): which version (i.e. branch) to retrieve
1736 ['revision'] (optional): the 'patch-NN' argument to check out
1737 ['archive']: the archive name to use. If None, use the archive's default
1738 ['build-config']: if present, give to 'tla build-config' after checkout
1741 header = "arch operation"
1742 buildconfig = None
1744 def setup(self, args):
1745 SourceBase.setup(self, args)
1746 self.vcexe = getCommand("tla")
1747 self.archive = args.get('archive')
1748 self.url = args['url']
1749 self.version = args['version']
1750 self.revision = args.get('revision')
1751 self.buildconfig = args.get('build-config')
1752 self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
1753 self.buildconfig)
1755 def sourcedirIsUpdateable(self):
1756 if self.revision:
1757 # Arch cannot roll a directory backwards, so if they ask for a
1758 # specific revision, clobber the directory. Technically this
1759 # could be limited to the cases where the requested revision is
1760 # later than our current one, but it's too hard to extract the
1761 # current revision from the tree.
1762 return False
1763 if os.path.exists(os.path.join(self.builder.basedir,
1764 self.srcdir, ".buildbot-patched")):
1765 return False
1766 return os.path.isdir(os.path.join(self.builder.basedir,
1767 self.srcdir, "{arch}"))
1769 def doVCUpdate(self):
1770 # update: possible for mode in ('copy', 'update')
1771 d = os.path.join(self.builder.basedir, self.srcdir)
1772 command = [self.vcexe, 'replay']
1773 if self.revision:
1774 command.append(self.revision)
1775 c = ShellCommand(self.builder, command, d,
1776 sendRC=False, timeout=self.timeout)
1777 self.command = c
1778 return c.start()
1780 def doVCFull(self):
1781 # to do a checkout, we must first "register" the archive by giving
1782 # the URL to tla, which will go to the repository at that URL and
1783 # figure out the archive name. tla will tell you the archive name
1784 # when it is done, and all further actions must refer to this name.
1786 command = [self.vcexe, 'register-archive', '--force', self.url]
1787 c = ShellCommand(self.builder, command, self.builder.basedir,
1788 sendRC=False, keepStdout=True,
1789 timeout=self.timeout)
1790 self.command = c
1791 d = c.start()
1792 d.addCallback(self._abandonOnFailure)
1793 d.addCallback(self._didRegister, c)
1794 return d
1796 def _didRegister(self, res, c):
1797 # find out what tla thinks the archive name is. If the user told us
1798 # to use something specific, make sure it matches.
1799 r = re.search(r'Registering archive: (\S+)\s*$', c.stdout)
1800 if r:
1801 msg = "tla reports archive name is '%s'" % r.group(1)
1802 log.msg(msg)
1803 self.builder.sendUpdate({'header': msg+"\n"})
1804 if self.archive and r.group(1) != self.archive:
1805 msg = (" mismatch, we wanted an archive named '%s'"
1806 % self.archive)
1807 log.msg(msg)
1808 self.builder.sendUpdate({'header': msg+"\n"})
1809 raise AbandonChain(-1)
1810 self.archive = r.group(1)
1811 assert self.archive, "need archive name to continue"
1812 return self._doGet()
1814 def _doGet(self):
1815 ver = self.version
1816 if self.revision:
1817 ver += "--%s" % self.revision
1818 command = [self.vcexe, 'get', '--archive', self.archive,
1819 '--no-pristine',
1820 ver, self.srcdir]
1821 c = ShellCommand(self.builder, command, self.builder.basedir,
1822 sendRC=False, timeout=self.timeout)
1823 self.command = c
1824 d = c.start()
1825 d.addCallback(self._abandonOnFailure)
1826 if self.buildconfig:
1827 d.addCallback(self._didGet)
1828 return d
1830 def _didGet(self, res):
1831 d = os.path.join(self.builder.basedir, self.srcdir)
1832 command = [self.vcexe, 'build-config', self.buildconfig]
1833 c = ShellCommand(self.builder, command, d,
1834 sendRC=False, timeout=self.timeout)
1835 self.command = c
1836 d = c.start()
1837 d.addCallback(self._abandonOnFailure)
1838 return d
1840 def parseGotRevision(self):
1841 # using code from tryclient.TlaExtractor
1842 # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
1843 # 'tla logs' gives us REVISION
1844 command = [self.vcexe, "logs", "--full", "--reverse"]
1845 c = ShellCommand(self.builder, command,
1846 os.path.join(self.builder.basedir, self.srcdir),
1847 environ=self.env,
1848 sendStdout=False, sendStderr=False, sendRC=False,
1849 keepStdout=True)
1850 c.usePTY = False
1851 d = c.start()
1852 def _parse(res):
1853 tid = c.stdout.split("\n")[0].strip()
1854 slash = tid.index("/")
1855 dd = tid.rindex("--")
1856 #branch = tid[slash+1:dd]
1857 baserev = tid[dd+2:]
1858 return baserev
1859 d.addCallback(_parse)
1860 return d
1862 registerSlaveCommand("arch", Arch, command_version)
1864 class Bazaar(Arch):
1865 """Bazaar (/usr/bin/baz) is an alternative client for Arch repositories.
1866 It is mostly option-compatible, but archive registration is different
1867 enough to warrant a separate Command.
1869 ['archive'] (required): the name of the archive being used
1872 def setup(self, args):
1873 Arch.setup(self, args)
1874 self.vcexe = getCommand("baz")
1875 # baz doesn't emit the repository name after registration (and
1876 # grepping through the output of 'baz archives' is too hard), so we
1877 # require that the buildmaster configuration to provide both the
1878 # archive name and the URL.
1879 self.archive = args['archive'] # required for Baz
1880 self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
1881 self.buildconfig)
1883 # in _didRegister, the regexp won't match, so we'll stick with the name
1884 # in self.archive
1886 def _doGet(self):
1887 # baz prefers ARCHIVE/VERSION. This will work even if
1888 # my-default-archive is not set.
1889 ver = self.archive + "/" + self.version
1890 if self.revision:
1891 ver += "--%s" % self.revision
1892 command = [self.vcexe, 'get', '--no-pristine',
1893 ver, self.srcdir]
1894 c = ShellCommand(self.builder, command, self.builder.basedir,
1895 sendRC=False, timeout=self.timeout)
1896 self.command = c
1897 d = c.start()
1898 d.addCallback(self._abandonOnFailure)
1899 if self.buildconfig:
1900 d.addCallback(self._didGet)
1901 return d
1903 def parseGotRevision(self):
1904 # using code from tryclient.BazExtractor
1905 command = [self.vcexe, "tree-id"]
1906 c = ShellCommand(self.builder, command,
1907 os.path.join(self.builder.basedir, self.srcdir),
1908 environ=self.env,
1909 sendStdout=False, sendStderr=False, sendRC=False,
1910 keepStdout=True)
1911 c.usePTY = False
1912 d = c.start()
1913 def _parse(res):
1914 tid = c.stdout.strip()
1915 slash = tid.index("/")
1916 dd = tid.rindex("--")
1917 #branch = tid[slash+1:dd]
1918 baserev = tid[dd+2:]
1919 return baserev
1920 d.addCallback(_parse)
1921 return d
1923 registerSlaveCommand("bazaar", Bazaar, command_version)
1926 class Mercurial(SourceBase):
1927 """Mercurial specific VC operation. In addition to the arguments
1928 handled by SourceBase, this command reads the following keys:
1930 ['repourl'] (required): the Cogito repository string
1933 header = "mercurial operation"
1935 def setup(self, args):
1936 SourceBase.setup(self, args)
1937 self.vcexe = getCommand("hg")
1938 self.repourl = args['repourl']
1939 self.sourcedata = "%s\n" % self.repourl
1940 self.stdout = ""
1941 self.stderr = ""
1943 def sourcedirIsUpdateable(self):
1944 if os.path.exists(os.path.join(self.builder.basedir,
1945 self.srcdir, ".buildbot-patched")):
1946 return False
1947 # like Darcs, to check out a specific (old) revision, we have to do a
1948 # full checkout. TODO: I think 'hg pull' plus 'hg update' might work
1949 if self.revision:
1950 return False
1951 return os.path.isdir(os.path.join(self.builder.basedir,
1952 self.srcdir, ".hg"))
1954 def doVCUpdate(self):
1955 d = os.path.join(self.builder.basedir, self.srcdir)
1956 command = [self.vcexe, 'pull', '--update', '--verbose']
1957 if self.args['revision']:
1958 command.extend(['--rev', self.args['revision']])
1959 c = ShellCommand(self.builder, command, d,
1960 sendRC=False, timeout=self.timeout,
1961 keepStdout=True)
1962 self.command = c
1963 d = c.start()
1964 d.addCallback(self._handleEmptyUpdate)
1965 return d
1967 def _handleEmptyUpdate(self, res):
1968 if type(res) is int and res == 1:
1969 if self.command.stdout.find("no changes found") != -1:
1970 # 'hg pull', when it doesn't have anything to do, exits with
1971 # rc=1, and there appears to be no way to shut this off. It
1972 # emits a distinctive message to stdout, though. So catch
1973 # this and pretend that it completed successfully.
1974 return 0
1975 return res
1977 def doVCFull(self):
1978 d = os.path.join(self.builder.basedir, self.srcdir)
1979 command = [self.vcexe, 'clone']
1980 if self.args['revision']:
1981 command.extend(['--rev', self.args['revision']])
1982 command.extend([self.repourl, d])
1983 c = ShellCommand(self.builder, command, self.builder.basedir,
1984 sendRC=False, timeout=self.timeout)
1985 self.command = c
1986 return c.start()
1988 def parseGotRevision(self):
1989 # we use 'hg identify' to find out what we wound up with
1990 command = [self.vcexe, "identify"]
1991 c = ShellCommand(self.builder, command,
1992 os.path.join(self.builder.basedir, self.srcdir),
1993 environ=self.env,
1994 sendStdout=False, sendStderr=False, sendRC=False,
1995 keepStdout=True)
1996 d = c.start()
1997 def _parse(res):
1998 m = re.search(r'^(\w+)', c.stdout)
1999 return m.group(1)
2000 d.addCallback(_parse)
2001 return d
2003 registerSlaveCommand("hg", Mercurial, command_version)
2006 class P4(SourceBase):
2007 """A P4 source-updater.
2009 ['p4port'] (required): host:port for server to access
2010 ['p4user'] (optional): user to use for access
2011 ['p4passwd'] (optional): passwd to try for the user
2012 ['p4client'] (optional): client spec to use
2013 ['p4views'] (optional): client views to use
2016 header = "p4"
2018 def setup(self, args):
2019 SourceBase.setup(self, args)
2020 self.p4port = args['p4port']
2021 self.p4client = args['p4client']
2022 self.p4user = args['p4user']
2023 self.p4passwd = args['p4passwd']
2024 self.p4base = args['p4base']
2025 self.p4extra_views = args['p4extra_views']
2026 self.p4mode = args['mode']
2027 self.p4branch = args['branch']
2028 self.p4logname = os.environ['LOGNAME']
2030 self.sourcedata = str([
2031 # Perforce server.
2032 self.p4port,
2034 # Client spec.
2035 self.p4client,
2037 # Depot side of view spec.
2038 self.p4base,
2039 self.p4branch,
2040 self.p4extra_views,
2042 # Local side of view spec (srcdir is made from these).
2043 self.builder.basedir,
2044 self.mode,
2045 self.workdir
2049 def sourcedirIsUpdateable(self):
2050 if os.path.exists(os.path.join(self.builder.basedir,
2051 self.srcdir, ".buildbot-patched")):
2052 return False
2053 # We assume our client spec is still around.
2054 # We just say we aren't updateable if the dir doesn't exist so we
2055 # don't get ENOENT checking the sourcedata.
2056 return os.path.isdir(os.path.join(self.builder.basedir,
2057 self.srcdir))
2059 def doVCUpdate(self):
2060 return self._doP4Sync(force=False)
2062 def _doP4Sync(self, force):
2063 command = ['p4']
2065 if self.p4port:
2066 command.extend(['-p', self.p4port])
2067 if self.p4user:
2068 command.extend(['-u', self.p4user])
2069 if self.p4passwd:
2070 command.extend(['-P', self.p4passwd])
2071 if self.p4client:
2072 command.extend(['-c', self.p4client])
2073 command.extend(['sync'])
2074 if force:
2075 command.extend(['-f'])
2076 if self.revision:
2077 command.extend(['@' + str(self.revision)])
2078 env = {}
2079 c = ShellCommand(self.builder, command, self.builder.basedir,
2080 environ=env, sendRC=False, timeout=self.timeout,
2081 keepStdout=True)
2082 self.command = c
2083 d = c.start()
2084 d.addCallback(self._abandonOnFailure)
2085 return d
2088 def doVCFull(self):
2089 env = {}
2090 command = ['p4']
2091 client_spec = ''
2092 client_spec += "Client: %s\n\n" % self.p4client
2093 client_spec += "Owner: %s\n\n" % self.p4logname
2094 client_spec += "Description:\n\tCreated by %s\n\n" % self.p4logname
2095 client_spec += "Root:\t%s\n\n" % self.builder.basedir
2096 client_spec += "Options:\tallwrite rmdir\n\n"
2097 client_spec += "LineEnd:\tlocal\n\n"
2099 # Setup a view
2100 client_spec += "View:\n\t%s" % (self.p4base)
2101 if self.p4branch:
2102 client_spec += "%s/" % (self.p4branch)
2103 client_spec += "... //%s/%s/...\n" % (self.p4client, self.srcdir)
2104 if self.p4extra_views:
2105 for k, v in self.p4extra_views:
2106 client_spec += "\t%s/... //%s/%s%s/...\n" % (k, self.p4client,
2107 self.srcdir, v)
2108 if self.p4port:
2109 command.extend(['-p', self.p4port])
2110 if self.p4user:
2111 command.extend(['-u', self.p4user])
2112 if self.p4passwd:
2113 command.extend(['-P', self.p4passwd])
2114 command.extend(['client', '-i'])
2115 log.msg(client_spec)
2116 c = ShellCommand(self.builder, command, self.builder.basedir,
2117 environ=env, sendRC=False, timeout=self.timeout,
2118 initialStdin=client_spec)
2119 self.command = c
2120 d = c.start()
2121 d.addCallback(self._abandonOnFailure)
2122 d.addCallback(lambda _: self._doP4Sync(force=True))
2123 return d
2125 registerSlaveCommand("p4", P4, command_version)
2128 class P4Sync(SourceBase):
2129 """A partial P4 source-updater. Requires manual setup of a per-slave P4
2130 environment. The only thing which comes from the master is P4PORT.
2131 'mode' is required to be 'copy'.
2133 ['p4port'] (required): host:port for server to access
2134 ['p4user'] (optional): user to use for access
2135 ['p4passwd'] (optional): passwd to try for the user
2136 ['p4client'] (optional): client spec to use
2139 header = "p4 sync"
2141 def setup(self, args):
2142 SourceBase.setup(self, args)
2143 self.vcexe = getCommand("p4")
2144 self.p4port = args['p4port']
2145 self.p4user = args['p4user']
2146 self.p4passwd = args['p4passwd']
2147 self.p4client = args['p4client']
2149 def sourcedirIsUpdateable(self):
2150 return True
2152 def _doVC(self, force):
2153 d = os.path.join(self.builder.basedir, self.srcdir)
2154 command = [self.vcexe]
2155 if self.p4port:
2156 command.extend(['-p', self.p4port])
2157 if self.p4user:
2158 command.extend(['-u', self.p4user])
2159 if self.p4passwd:
2160 command.extend(['-P', self.p4passwd])
2161 if self.p4client:
2162 command.extend(['-c', self.p4client])
2163 command.extend(['sync'])
2164 if force:
2165 command.extend(['-f'])
2166 if self.revision:
2167 command.extend(['@' + self.revision])
2168 env = {}
2169 c = ShellCommand(self.builder, command, d, environ=env,
2170 sendRC=False, timeout=self.timeout)
2171 self.command = c
2172 return c.start()
2174 def doVCUpdate(self):
2175 return self._doVC(force=False)
2177 def doVCFull(self):
2178 return self._doVC(force=True)
2180 registerSlaveCommand("p4sync", P4Sync, command_version)