clone/update: mark project changed on failure
[girocco.git] / jobd / jobd.pl
blob32b0d735fd0d0f86f0b861e46a6b9891ce7b79e9
1 #!/usr/bin/perl
3 # jobd - perform Girocco maintenance jobs
5 # Run with --help for details
7 use strict;
8 use warnings;
10 use Getopt::Long;
11 use Pod::Usage;
12 use POSIX ":sys_wait_h";
13 use Cwd qw(realpath);
15 use lib "__BASEDIR__";
16 use Girocco::Config;
17 use Girocco::Project;
18 use Girocco::User;
19 use Girocco::Util;
20 BEGIN {noFatalsToBrowser}
21 use Girocco::ExecUtil;
23 # Options
24 my $quiet;
25 my $progress;
26 my $cpus = online_cpus;
27 my $kill_after = 900;
28 my $max_par = $cpus ? $cpus * 2 : 8;
29 my $max_par_intensive = 1;
30 my $load_triggers = $cpus ? sprintf("%g,%g", $cpus * 1.5, $cpus * 0.75) : "6,3";
31 my $lockfile = "/tmp/jobd-$Girocco::Config::tmpsuffix.lock";
32 my $restart_delay = 300;
33 my $all_once;
34 my $same_pid;
35 my @one = ();
36 my ($update_only, $gc_only, $needs_gc_only);
38 my ($load_trig, $load_untrig);
40 ######### Jobs {{{1
42 sub update_project {
43 my $job = shift;
44 my $p = $job->{'project'};
45 check_project_exists($job) || return;
46 my $projpath = get_project_path($p);
47 if ($gc_only || $needs_gc_only ||
48 -e "$projpath/.nofetch" ||
49 -e "$projpath/.bypass" ||
50 -e "$projpath/.bypass_fetch" ||
51 is_mirror_disabled($p)) {
52 job_skip($job);
53 return setup_gc($job);
55 if (-e "$projpath/.clone_in_progress" && ! -e "$projpath/.clone_failed") {
56 job_skip($job, "initial mirroring not complete yet");
57 return;
59 if (-e "$projpath/.clone_failed") {
60 job_skip($job, "initial mirroring failed");
61 # Still need to gc non top-level clones even if they've failed
62 # otherwise the objects copied into them from the parent will
63 # just accumulate without bound
64 setup_gc($job) if $p =~ m,/,;
65 return;
67 if (my $ts = is_operation_uptodate($p, 'lastrefresh', rand_adjust($Girocco::Config::min_mirror_interval))) {
68 job_skip($job, "not needed right now, last run at $ts");
69 setup_gc($job);
70 return;
72 if (is_svn_clone($p)) {
73 # git svn can be very, very slow at times
74 $job->{'timeout_factor'} = 3;
76 exec_job_command($job, ["$Girocco::Config::basedir/jobd/update.sh", $p], $quiet);
79 sub gc_project {
80 my $job = shift;
81 my $p = $job->{'project'};
82 check_project_exists($job) || return;
83 my $projpath = get_project_path($p);
84 if ($update_only || -e "$projpath/.nogc" || -e "$projpath/.bypass" ||
85 (-e "$projpath/.delaygc" && ! -e "$projpath/.allowgc" && ! -e "$projpath/.needsgc")) {
86 job_skip($job);
87 return;
89 my $ts = "";
90 if (! -e "$projpath/.needsgc" && ($needs_gc_only ||
91 ($ts = is_operation_uptodate($p, 'lastgc', rand_adjust($Girocco::Config::min_gc_interval))))) {
92 job_skip($job, ($needs_gc_only ? undef : "not needed right now, last run at $ts"));
93 return;
95 # allow garbage collection to run for longer than an update
96 $job->{'lastgc'} = get_git_config($projpath, "gitweb.lastgc");
97 $job->{'timeout_factor'} = 2;
98 exec_job_command($job, ["$Girocco::Config::basedir/jobd/gc.sh", $p], $quiet);
101 sub setup_gc {
102 my $job = shift;
103 queue_job(
104 project => $job->{'project'},
105 type => 'gc',
106 command => \&gc_project,
107 intensive => 1,
108 on_success => \&maybe_setup_gc_again,
112 sub maybe_setup_gc_again {
113 my $job = shift;
114 # If lastgc was set then gc.sh ran successfully and now it's not set
115 # then queue up another run of gc.sh for the project.
116 # However, just in case, no matter what happens with the extra
117 # gc.sh run no more "bonus" runs are possible to avoid any loops.
118 # This allows a "mini" gc that triggers a full gc to have the
119 # full gc run as part of the same --all-once run through instead
120 # of waiting. A very good thing for users of the --all-once option.
121 if ($job->{'lastgc'}) {
122 my $projpath = get_project_path($job->{'project'});
123 get_git_config($projpath, "gitweb.lastgc") or
124 queue_job(
125 project => $job->{'project'},
126 type => 'gc',
127 command => \&gc_project,
128 intensive => 1,
133 sub check_project_exists {
134 my $job = shift;
135 my $p = $job->{'project'};
136 if (! -d get_project_path($p)) {
137 job_skip($job, "non-existent project");
138 return 0;
143 sub get_project_path {
144 "$Girocco::Config::reporoot/".shift().".git";
147 my $_last_config_path;
148 my $_last_config_id;
149 my $_last_config;
150 BEGIN {
151 $_last_config_path = "";
152 $_last_config_id = "";
153 $_last_config = {};
156 sub get_git_config {
157 my ($projdir, $name) = @_;
158 defined($projdir) && -d $projdir && -f "$projdir/config" or return undef;
159 my $cf = "$projdir/config";
160 my @stat = stat($cf);
161 @stat && $stat[7] && $stat[9] or return undef;
162 my $id = join(":", $stat[0], $stat[1], $stat[7], $stat[9]); # dev,ino,size,mtime
163 if ($_last_config_path ne $cf || $_last_config_id ne $id || ref($_last_config) ne 'HASH') {
164 my $data = read_config_file_hash($cf);
165 defined($data) or $data = {};
166 $_last_config_path = $_last_config_id = "";
167 $_last_config = $data;
168 $_last_config_id = $id;
169 $_last_config_path = $cf;
171 return $_last_config->{$name};
174 sub is_operation_uptodate {
175 my ($project, $which, $threshold) = @_;
176 my $path = get_project_path($project);
177 my $timestamp = get_git_config($path, "gitweb.$which");
178 defined($timestamp) or $timestamp = '';
179 my $unix_ts = parse_rfc2822_date($timestamp) || 0;
180 (time - $unix_ts) <= $threshold ? $timestamp : undef;
183 sub is_mirror_disabled {
184 my ($project) = @_;
185 my $path = get_project_path($project);
186 my $baseurl = get_git_config($path, 'gitweb.baseurl');
187 defined($baseurl) or $baseurl = '';
188 $baseurl =~ s/^\s+//;
189 $baseurl =~ s/\s+$//;
190 return $baseurl eq "" || $baseurl =~ /\s/ || $baseurl =~ /^disabled(?:\s|$)/i;
193 sub is_svn_clone {
194 my ($project) = @_;
195 my $path = get_project_path($project);
196 my $baseurl = get_git_config($path, 'gitweb.baseurl');
197 defined($baseurl) or $baseurl = '';
198 my $svnurl = get_git_config($path, 'svn-remote.svn.url');
199 defined($svnurl) or $svnurl = '';
200 return $baseurl =~ /^svn[:+]/i && $svnurl;
203 sub queue_one {
204 my $project = shift;
205 queue_job(
206 project => $project,
207 type => 'update',
208 command => \&update_project,
209 on_success => \&setup_gc,
210 on_error => \&setup_gc,
214 sub queue_all {
215 queue_one($_) for (Girocco::Project->get_full_list());
218 ######### Daemon operation {{{1
220 my @queue;
221 my @running;
222 my $perpetual = 1;
223 my $locked = 0;
224 my $jobs_executed;
225 my $jobs_skipped;
226 my @jobs_killed;
228 # Kills and reaps the specified pid. Returns exit status ($?) on success
229 # otherwise undef if process could not be killed or reaped
230 # First sends SIGINT and if process does not exit within 15 seconds then SIGKILL
231 # We used to send SIGTERM instead of SIGINT, but by using SIGINT we can take
232 # advantage of "tee -i" in our update scripts and really anything we're killing
233 # should respond the same to either SIGINT or SIGTERM and exit gracefully.
234 # Usage: my $exitcode = kill_gently($pid, $kill_process_group = 0);
235 sub kill_gently {
236 my $targ = shift;
237 my $use_pg = shift || 0;
238 # Note that the docs for Perl's kill state that a negative signal
239 # number should be used to kill process groups and that while a
240 # a negative process id (and positive signal number) may also do that
241 # on some platforms, that's not portable.
242 my $pg = $use_pg ? -1 : 1;
243 my $harsh = time() + 15; # SIGKILL after this delay
244 my $count = kill(2*$pg, $targ); # SIGINT is 2
245 my $reaped = waitpid($targ, WNOHANG);
246 return undef if $reaped < 0;
247 return $? if $reaped == $targ;
248 while ($count && time() < $harsh) {
249 select(undef, undef, undef, 0.2);
250 $reaped = waitpid($targ, WNOHANG);
251 return undef if $reaped < 0;
252 return $? if $reaped == $targ;
254 $harsh = time() + 2;
255 $count = kill(9*$pg, $targ); # SIGKILL is 9
256 $reaped = waitpid($targ, WNOHANG);
257 return undef if $reaped < 0;
258 return $? if $reaped == $targ;
259 # We should not need to wait to reap a SIGKILL, however, just in case
260 # the system doesn't make a SIGKILL'd process immediately reapable
261 # (perhaps under extremely heavy load) we accomodate a brief delay
262 while ($count && time() < $harsh) {
263 select(undef, undef, undef, 0.2);
264 $reaped = waitpid($targ, WNOHANG);
265 return undef if $reaped < 0;
266 return $? if $reaped == $targ;
268 return undef;
271 sub handle_softexit {
272 error("Waiting for outstanding jobs to finish... ".
273 "^C again to exit immediately");
274 @queue = ();
275 $perpetual = 0;
276 $SIG{'INT'} = \&handle_exit;
279 sub handle_exit {
280 error("Killing outstanding jobs, please be patient...");
281 $SIG{'TERM'} = 'IGNORE';
282 for (@running) {
283 kill_gently($_->{'pid'}, 1);
285 unlink $lockfile if ($locked);
286 exit(0);
289 sub queue_job {
290 my %opts = @_;
291 $opts{'queued_at'} = time;
292 $opts{'dont_run'} = 0;
293 $opts{'intensive'} = 0 unless exists $opts{'intensive'};
294 push @queue, \%opts;
297 sub run_job {
298 my $job = shift;
300 push @running, $job;
301 $job->{'command'}->($job);
302 if ($job->{'dont_run'}) {
303 pop @running;
304 $jobs_skipped++;
305 return;
309 sub _job_name {
310 my $job = shift;
311 "[".$job->{'type'}."::".$job->{'project'}."]";
314 # Only one of those per job!
315 sub exec_job_command {
316 my ($job, $command, $err_only) = @_;
318 my $pid;
319 $job->{'finished'} = 0;
320 delete $job->{'pid'};
321 if (!defined($pid = fork)) {
322 error(_job_name($job) ." Can't fork job: $!");
323 $job->{'finished'} = 1;
324 return;
326 if (!$pid) {
327 # "Prevent" races
328 select(undef, undef, undef, 0.1);
330 open STDIN, '<', '/dev/null' || do {
331 error(_job_name($job) ."Can't read from /dev/null: $!");
332 exit 71; # EX_OSERR
334 if ($err_only) {
335 open STDOUT, '>', '/dev/null' || do {
336 error(_job_name($job) ." Can't write to /dev/null: $!");
337 exit 71; # EX_OSERR
340 # New process group so we can keep track of all of its children
341 if (!defined(POSIX::setpgid(0, 0))) {
342 error(_job_name($job) ." Can't create process group: $!");
343 exit 71; # EX_OSERR
346 exec @$command;
347 # Stop perl from complaining
348 exit 71; # EX_OSERR
350 $job->{'pid'} = $pid;
351 $job->{'started_at'} = time;
354 sub job_skip {
355 my ($job, $msg) = @_;
356 $job->{'dont_run'} = 1;
357 error(_job_name($job) ." Skipping job: $msg") unless $quiet || !$msg;
360 sub reap_hanging_jobs {
361 for (@running) {
362 my $factor = $_->{'timeout_factor'} || 1;
363 if (defined($_->{'started_at'}) && (time - $_->{'started_at'}) > ($kill_after * $factor)) {
364 $_->{'finished'} = 1;
365 my $exitcode = kill_gently($_->{'pid'}, 1);
366 delete $_->{'pid'};
367 $_->{'killed'} = 1;
368 error(_job_name($_) ." KILLED due to timeout" .
369 (($exitcode & 0x7f) == 9 ? " with SIGKILL": ""));
370 push @jobs_killed, _job_name($_);
375 sub reap_one_job {
376 my $job = shift;
377 if (!$job->{'finished'}) {
378 $job->{'on_success'}->($job) if defined($job->{'on_success'});
379 $job->{'finished'} = 1;
380 $jobs_executed++;
381 } else {
382 $job->{'on_error'}->($job) if defined($job->{'on_error'});
386 sub reap_finished_jobs {
387 my $pid;
388 my $finished_any = 0;
389 foreach my $child (grep { !$_->{'pid'} && $_->{'killed'} } @running) {
390 delete $child->{'killed'};
391 reap_one_job($child);
392 $finished_any = 1;
394 while (1) {
395 $pid = waitpid(-1, WNOHANG);
396 last if $pid <= 0;
397 $finished_any = 1;
399 my @child = grep { $_->{'pid'} && $_->{'pid'} == $pid } @running;
400 if ($?) {
401 # any non-zero exit status should trigger on_error
402 $child[0]->{'finished'} = 1 if @child;
404 if (@child) {
405 delete $child[0]->{'pid'};
406 reap_one_job($child[0]);
409 @running = grep { $_->{'finished'} == 0 } @running;
410 $finished_any;
413 sub have_intensive_jobs {
414 grep { $_->{'intensive'} == 1 } @running;
417 sub ts {
418 "[". scalar(localtime) ."] ";
421 sub get_load_info {
422 if ($^O eq "linux") {
423 # Read /proc/loadavg on Linux
424 open(LOADAV, '<', '/proc/loadavg') or return undef;
425 my $loadinfo = <LOADAV>;
426 close LOADAV;
427 return (split(/\s/, $loadinfo, 4))[0..2];
428 } else {
429 # Read the output of uptime everywhere else (works on Linux too)
430 open(LOADAV, '-|', 'uptime') or return undef;
431 my $loadinfo = <LOADAV>;
432 close LOADAV;
433 $loadinfo =~ /load average[^0-9.]*([0-9.]+)[^0-9.]+([0-9.]+)[^0-9.]+([0-9.]+)/iso or return undef;
434 return ($1, $2, $3);
438 sub run_queue {
439 my $last_progress = time;
440 my $last_checkload = time - 5;
441 my $current_load = $load_trig;
442 my $overloaded = 0;
443 my $load_info = '';
444 $jobs_executed = 0;
445 $jobs_skipped = 0;
446 @jobs_killed = ();
447 if ($progress) {
448 my $s = @queue == 1 ? '' : 's';
449 ferror("--- Processing %d queued job$s", scalar(@queue));
451 $SIG{'INT'} = \&handle_softexit;
452 $SIG{'TERM'} = \&handle_exit;
453 while (@queue || @running) {
454 reap_hanging_jobs();
455 my $proceed_immediately = reap_finished_jobs();
456 # Check current system load
457 if ($load_trig && (time - $last_checkload) >= 5 && defined((my @loadinfo = get_load_info())[0])) {
458 my $current_load = $loadinfo[0];
459 if ($current_load > $load_trig && !$overloaded) {
460 $overloaded = 1;
461 error("PAUSE: system load is at $current_load > $load_trig") if $progress;
462 } elsif ($current_load < $load_untrig && $overloaded) {
463 $overloaded = 0;
464 error("RESUME: system load is at $current_load < $load_untrig") if $progress;
466 if ($overloaded) {
467 $load_info = ', paused (load '. $current_load .')';
468 } else {
469 $load_info = ', load '. $current_load;
471 $last_checkload = time;
473 # Status output
474 if ($progress && (time - $last_progress) >= 60) {
475 ferror("STATUS: %d queued, %d running, %d finished, %d skipped, %d killed$load_info", scalar(@queue), scalar(@running), $jobs_executed, $jobs_skipped, scalar(@jobs_killed));
476 if (@running) {
477 my @run_status;
478 for (@running) {
479 push @run_status, _job_name($_)." ". (time - $_->{'started_at'}) ."s";
481 error("STATUS: currently running: ". join(', ', @run_status));
483 $last_progress = time;
485 # Back off if we're too busy
486 if (@running >= $max_par || have_intensive_jobs() >= $max_par_intensive || !@queue || $overloaded) {
487 sleep 1 unless $proceed_immediately;
488 next;
490 # Run next
491 run_job(shift(@queue)) if @queue;
493 if ($progress) {
494 my $s = $jobs_executed == 1 ? '' : 's';
495 ferror("--- Queue processed. %d job$s executed, %d skipped, %d killed.", $jobs_executed, $jobs_skipped, scalar(@jobs_killed));
499 sub run_perpetually {
500 if (-e $lockfile) {
501 die "Lockfile '$lockfile' exists. Please make sure no other instance of jobd is running.\n";
503 open LOCK, '>', $lockfile || die "Cannot create lockfile '$lockfile': $!\n";
504 print LOCK $$;
505 close LOCK;
506 $locked = 1;
508 my $result = "";
509 while ($perpetual) {
510 # touch ctime of lockfile to prevent it from being removed by /tmp cleaning
511 chmod 0640, $lockfile;
512 chmod 0644, $lockfile;
513 # check for restart request
514 open LOCK, '<', $lockfile || die "Lock file '$lockfile' has disappeared!\n";
515 my $request = <LOCK>;
516 close LOCK;
517 chomp $request if defined($request);
518 if (defined($request) && $request eq "restart") {
519 $result = $request;
520 last;
522 queue_all();
523 run_queue();
524 sleep($restart_delay) if $perpetual; # Let the system breathe for a moment
526 unlink $lockfile;
527 $locked = 0;
528 return $result;
531 ######### Helpers {{{1
533 sub error($) {
534 print STDERR ts().shift()."\n";
536 sub ferror(@) {
537 error(sprintf($_[0], @_[1..$#_]));
539 sub fatal($) {
540 error(shift);
541 exit 1;
544 ######### Main {{{1
546 my $reexec = Girocco::ExecUtil->new;
547 my $realpath0 = realpath($0);
548 chdir "/";
549 close(DATA) if fileno(DATA);
550 # Parse options
551 Getopt::Long::Configure('bundling');
552 my %one_once;
553 my $parse_res = GetOptions(
554 'help|?|h' => sub {
555 pod2usage(-verbose => 2, -exitval => 0, -input => $realpath0)},
556 'quiet|q' => \$quiet,
557 'progress|P' => \$progress,
558 'kill-after|k=i' => \$kill_after,
559 'max-parallel|p=i' => \$max_par,
560 'max-intensive-parallel|i=i' => \$max_par_intensive,
561 'load-triggers=s' => \$load_triggers,
562 'restart-delay|d=i' => \$restart_delay,
563 'lockfile|l=s' => \$lockfile,
564 'same-pid' => \$same_pid,
565 'all-once|a' => \$all_once,
566 'one|o=s' => sub {$one_once{$_[1]} = 1, push(@one, $_[1])
567 unless exists $one_once{$_[1]}},
568 'update-only' => \$update_only,
569 'gc-only' => \$gc_only,
570 'needs-gc-only' => \$needs_gc_only,
571 ) || pod2usage(-exitval => 2, -input => $realpath0);
572 fatal("Error: can only use one out of --all-once and --one")
573 if $all_once && @one;
574 my $onlycnt = ($update_only?1:0) + ($gc_only?1:0) + ($needs_gc_only?1:0);
575 fatal("Error: can only use one out of --update-only, --gc-only and --needs-gc-only")
576 if $onlycnt > 1;
577 fatal("Error: --update-only, --gc-only or --needs-gc-only requires --all-once or --one")
578 if $onlycnt && !($all_once || @one);
580 unless ($quiet) {
581 $ENV{'show_progress'} = '1';
582 $progress = 1;
585 $load_triggers = '0,0' unless defined((get_load_info())[0]);
586 ($load_trig, $load_untrig) = split(/,/, $load_triggers);
588 if (@one) {
589 queue_one($_) foreach @one;
590 run_queue();
591 exit;
594 if ($all_once) {
595 queue_all();
596 run_queue();
597 exit;
601 if (run_perpetually() eq "restart") {
602 error("Restarting in response to restart request... ");
603 $reexec->reexec($same_pid);
604 error("Continuing after failed restart: $!");
605 chdir "/";
606 redo;
610 ########## Documentation {{{1
612 __END__
614 =head1 NAME
616 jobd.pl - Perform Girocco maintenance jobs
618 =head1 SYNOPSIS
620 jobd.pl [options]
622 Options:
623 -h | --help detailed instructions
624 -q | --quiet run quietly
625 -P | --progress show occasional status updates
626 -k SECONDS | --kill-after SECONDS how long to wait before killing jobs
627 -p NUM | --max-parallel NUM how many jobs to run at the same time
628 -i NUM | --max-intensive-parallel NUM how many resource-hungry jobs to run
629 at the same time
630 --load-triggers TRIG,UNTRIG stop queueing jobs at load above
631 TRIG and resume at load below UNTRIG
632 -d NUM | --restart-delay SECONDS wait for this many seconds between
633 queue runs
634 -l FILE | --lockfile FILE create a lockfile in the given
635 location
636 --same-pid keep same pid during graceful restart
637 -a | --all-once process the list only once
638 -o PRJNAME | --one PRJNAME process only one project
639 --update-only process mirror updates only
640 --gc-only perform needed garbage collection only
641 --needs-gc-only perform needed mini gc only
643 =head1 OPTIONS
645 =over 8
647 =item B<--help>
649 Print the full description of jobd.pl's options.
651 =item B<--quiet>
653 Suppress non-error messages, e.g. for use when running this task as a cronjob.
655 =item B<--progress>
657 Show information about the current status of the job queue occasionally. This
658 is automatically enabled if --quiet is not given.
660 =item B<--kill-after SECONDS>
662 Kill supervised jobs after a certain time to avoid hanging the daemon.
664 =item B<--max-parallel NUM>
666 Run no more than that many jobs at the same time. The default is the number
667 of cpus * 2. If the number of cpus cannot be determined, the default is 8.
669 =item B<--max-intensive-parallel NUM>
671 Run no more than that many resource-hungry jobs at the same time. Right now,
672 this refers to repacking jobs. The default is 1.
674 =item B<--load-triggers TRIG,UNTRIG>
676 If the first system load average (1 minute average) exceeds TRIG, don't queue
677 any more jobs until it goes below UNTRIG. This is currently only supported on
678 Linux and any other platforms that provide an uptime command with load average
679 output.
681 If both values are zero, load checks are disabled. The default is the number
682 of cpus * 1.5 for TRIG and half that for UNTRIG. If the number of cpus cannot
683 be determined, the default is 6,3.
685 =item B<--restart-delay NUM>
687 After processing the queue, wait this many seconds until the queue is
688 restarted. The default is 300 seconds.
690 =item B<--lockfile FILE>
692 For perpetual operation, specify the full path to a lock file to create and
693 then remove after finishing/aborting. The default is /tmp/jobd-$suffix.lock
694 where $suffix is a 6-character string uniquely determined by the name and
695 nickname of this Girocco instance. The pid of the running jobd instance will
696 be written to the lock file.
698 =item B<--same-pid>
700 When performing a graceful restart, keep the same pid rather than switching to
701 a new one.
703 =item B<--all-once>
705 Instead of perpetually processing all projects over and over again, process
706 them just once and then exit.
707 Conflicts with B<--one PRJNAME> option.
709 =item B<--one PRJNAME>
711 Process only the given project (given as just the project name without C<.git>
712 suffix) and then exit. May be repeated to process more than one project.
713 Conflicts with B<--all-once> option.
715 =item B<--update-only>
717 Limit processing to only those projects that need a mirror update.
718 Behaves as though every project has a C<.nogc> file present in it.
719 Requires use of B<--all-once> or B<--one PRJNAME> option.
720 Conflicts with B<--gc-only> and B<--needs-gc-only> options.
722 =item B<--gc-only>
724 Limit processing to only those projects that need to have garbage collection
725 run on them. Behaves as though every project has a C<.bypass_fetch> file
726 present in it. Requires use of B<--all-once> or B<--one PRJNAME> option.
727 Conflicts with B<--update-only> and B<--needs-gc-only> options.
729 =item B<--needs-gc-only>
731 Limit processing to only those projects that need to have mini garbage
732 collection run on them. Behaves as though every project with a C<.needsgc>
733 file present in it also has a C<.bypass_fetch> file present in it and as though
734 every project without a C<.needsgc> file present in it has a C<.bypass> file
735 present in it. Requires use of B<--all-once> or B<--one PRJNAME> option.
736 Conflicts with B<--update-only> and B<--gc-only> options.
738 =back
740 =head1 DESCRIPTION
742 jobd.pl is Girocco's repositories maintenance servant; it periodically checks
743 all the repositories and updates mirrored repositories and repacks push-mode
744 repositories when needed.
746 =cut