tmpdir: make sure temp files end up in /tmp
[girocco.git] / jobd / gc.sh
blob29aa95c5ec48b09480030fcfbb4fd3a3d6e710a4
1 #!/bin/sh
3 # NOTE: additional options can be passed to git repack by specifying
4 # them after the project name, for example:
5 # gc.sh my-project -f
7 . @basedir@/shlib.sh
9 set -e
11 if [ $# -lt 1 ]; then
12 echo "Usage: gc.sh projname [extra-repack-args]" >&2
13 exit 1
16 # packing options
17 packopts="--depth=50 --window=50 --window-memory=${var_window_memory:-1g}"
18 quiet=; [ -n "$show_progress" ] || quiet=-q
20 umask 002
21 [ "$cfg_permission_control" != "Hooks" ] || umask 000
23 pidactive() {
24 if _result="$(kill -0 "$1" 2>&1)"; then
25 # process exists and we have permission to signal it
26 return 0
28 case "$_result" in *"not permitted"*)
29 # we do not have permission to signal the process
30 return 0
31 esac
32 # process does not exist
33 return 1
36 createlock() {
37 # A .lock file should only exist for much less than a second.
38 # If we see a stale lock file (> 1h old), remove it and then,
39 # just in case, wait 30 seconds for any process whose .lock
40 # we might have just removed (it's racy) to finish doing what
41 # should take much less than a second to do.
42 _stalelock="$(find "$1.lock" -maxdepth 1 -mmin +60 -print 2>/dev/null || :)"
43 if [ -n "$_stalelock" ]; then
44 rm -f "$_stalelock"
45 sleep 30
47 for _try in p p n; do
48 if (set -C; > "$1.lock") 2>/dev/null; then
49 echo "$1.lock"
50 return 0
52 # delay and try again
53 [ "$_try" != "p" ] || sleep 1
54 done
55 # cannot create lock file
56 return 1
59 # return true if there's more than one objects/pack-<sha>.pack file or
60 # ANY sha-1 files in objects
61 is_dirty() {
62 _packs=$(find objects/pack -type f -name "pack-$octet20.pack" -print | head -n 2 | LC_ALL=C wc -l)
63 if [ $_packs != 1 ] && [ $_packs != 0 ]; then
64 return 0
66 _objs=$(find objects/$octet -type f -name "$octet19" -print 2>/dev/null | head -n 1 | LC_ALL=C wc -l)
67 [ $_objs -ne 0 ]
70 # make sure combine-packs uses the correct Git executable
71 run_combine_packs() {
72 PATH="$var_git_exec_path:$cfg_basedir/bin:$PATH" @basedir@/jobd/combine-packs.sh "$@"
75 # combine the input pack(s) into a new pack (or possibly packs if packSizeLimit set)
76 # input pack names are read from standard input one per line delimited by the first
77 # ':', ' ' or '\n' character on the line (which allows gfi-packs to be read directly)
78 # all arguments, if any, are passed to pack-objects as additional options
79 # returns non-zero on failure AND creates .gc_failed in that case
80 combine_packs() {
81 rm -f .gc_failed
82 find objects/pack -maxdepth 1 -type f -name '*.zap*' -print0 | xargs -0 rm -f
83 run_combine_packs --replace "$@" $packopts --all-progress-implied $quiet --non-empty || {
84 >.gc_failed
85 return 1
87 return 0
90 # if the current directory is_gfi_mirror then repack all packs listed in gfi-packs
91 repack_gfi_packs() {
92 [ -f gfi-packs -a -s gfi-packs ] && is_gfi_mirror || return 0
93 [ -d objects/pack ] || { rm -f gfi-packs; return 0; }
94 combine_packs --ignore-missing --no-reuse-delta <gfi-packs
95 rm -f gfi-packs
96 return 0
99 # combine small packs into larger pack(s)
100 # we avoid any keep, bndl or bitmap packs
101 combine_small_packs() {
102 _lpo="--exclude-no-idx --exclude-keep --exclude-bitmap --exclude-bndl --quiet"
103 _lpo="$_lpo --object-limit $var_redelta_threshold objects/pack"
104 while
105 _cnt="$(list_packs --count $_lpo || :)"
106 test "${_cnt:-0}" -ge 2
108 _newp="$(list_packs $_lpo | combine_packs --names --no-reuse-delta)"
109 _newc="$(echo $(echo "$_newp" | LC_ALL=C wc -w))"
110 # be paranoid and exit the loop if we haven't reduced the number of packs
111 [ $_newc -lt $_cnt ] || break
112 done
113 return 0
116 # Unfortunately git-svn lacks the ability to store newly fetched revisions as a pack.
117 # However, the fetch code conveniently sets .svnpack just before it runs git-svn fetch
118 # so that it's easy to find all the objects that have been fetched by git-svn and
119 # combine them into a pack. The --no-reuse-delta option is meaningless here since
120 # everything to be packed is a loose object and therefore not a delta so deltification
121 # will always take place.
122 make_svn_pack() {
123 [ -f .svnpack ] && is_svn_mirror || return 0
124 rm -f .svnpackgc
125 mv -f .svnpack .svnpackgc
126 _newp="$(find objects/$octet -maxdepth 1 -type f -newer .svnpackgc -name "$octet19" -print 2>/dev/null |
127 LC_ALL=C awk -F / '{print $2 $3}' |
128 run_combine_packs --objects --names $packopts --incremental --all-progress-implied $quiet --non-empty)" || {
129 mv -f .svnpackgc .svnpack
130 >.gc_failed
131 return 1
133 if [ -n "$_newp" ]; then
134 # remove the now-redundant loose objects -- this is always safe
135 # even during a concurrent push because a reprepare_packed_git
136 # will be triggered if an object that should be there is not
137 # found thereby finding it in the new pack instead
138 git prune-packed $quiet
140 rm -f .svnpackgc
143 # HEADSHA="$(pack_is_complete /full/path/to/some.pack /full/path/to/packed-refs "$(cat HEAD)")"
144 pack_is_complete() {
145 # Must have a matching .idx file and a non-empty packed-refs file
146 [ -s "${1%.pack}.idx" ] || return 1
147 [ -s "$2" ] || return 1
148 _headsha=
149 case "$3" in
150 $octet20)
151 _headsha="$3"
153 "ref: refs/"?*|"ref:refs/"?*|"refs/"?*)
154 _headmatch="${3#ref:}"
155 _headmatch="${_headmatch# }"
156 _headmatchpat="$(echo "$_headmatch" | LC_ALL=C sed -e 's/\([.$]\)/\\\1/g')"
157 _headsha="$(LC_ALL=C grep -e "^$octet20 $_headmatchpat\$" < "$2" | \
158 LC_ALL=C cut -d ' ' -f 1)"
159 case "$_headsha" in $octet20) :;; *)
160 return 1
161 esac
164 # bad HEAD
165 return 1
166 esac
167 rm -rf pack_is_complete_test
168 mkdir pack_is_complete_test
169 mkdir pack_is_complete_test/refs
170 mkdir pack_is_complete_test/objects
171 mkdir pack_is_complete_test/objects/pack
172 echo "$_headsha" > pack_is_complete_test/HEAD
173 ln -s "$1" pack_is_complete_test/objects/pack/
174 ln -s "${1%.pack}.idx" pack_is_complete_test/objects/pack/
175 ln -s "$2" pack_is_complete_test/packed-refs
176 _count="$(git --git-dir=pack_is_complete_test rev-list --count --all 2>/dev/null || :)"
177 rm -rf pack_is_complete_test
178 [ -n "$_count" ] || return 1
179 [ "$_count" -gt 0 ] 2>/dev/null || return 1
180 echo "$_headsha"
183 # On return a "$lockf" will have been created that must be removed when gc is done
184 lock_gc() {
185 # be compatibile with gc.pid file from newer Git releases
186 lockf=gc.pid
187 hn="$(hostname)"
188 active=
189 if [ "$(createlock "$lockf")" ]; then
190 # If $lockf is:
191 # 1) less than 12 hours old
192 # 2) contains two fields (pid hostname) NO trailing NL
193 # 3) the hostname is different OR the pid is still alive
194 # then we exit as another active process is holding the lock
195 if [ "$(find "$lockf" -maxdepth 1 -mmin -720 -print 2>/dev/null)" ]; then
196 apid=
197 ahost=
198 read -r apid ahost ajunk < "$lockf" || :
199 if [ "$apid" ] && [ "$ahost" ]; then
200 if [ "$ahost" != "$hn" ] || pidactive "$apid"; then
201 active=1
205 else
206 echo >&2 "[$proj] unable to create gc.pid.lock file"
207 exit 1
209 if [ -n "$active" ]; then
210 rm -f "$lockf.lock"
211 echo >&2 "[$proj] gc already running on machine '$ahost' pid '$apid'"
212 exit 1
214 printf "%s %s" "$$" "$hn" > "$lockf.lock"
215 chmod 0664 "$lockf.lock"
216 mv -f "$lockf.lock" "$lockf"
219 # Remove any crud that's been left behind by interrupted operations
220 # that did not clean up after themselves
221 remove_crud() {
222 # Remove any existing FETCH_HEAD
223 # There can only be a FETCH_HEAD if we've been fetching, not if we've been
224 # receiving pushes (those never create a FETCH_HEAD).
225 # And if we're fetching because we're a mirror, we know we're not fetching right
226 # now since jobd.pl never runs a project's fetch simultaneously with its gc.
227 # Therefore any existing FETCH_HEAD is junk. And it may be many megabytes if
228 # there were a lot of refs.
229 rm -f FETCH_HEAD
231 # Remove any stale pack remnants that are more than an hour old.
232 # Stale pack fragments are defined as any pack-<sha1>.ext where .ext is NOT
233 # .pack AND the corresponding .pack DOES NOT exist. A bunch of stale
234 # pack-<sha1>.idx files without their corresponding .pack files are worthless
235 # and just waste space. Normally there shouldn't be any remnants but actually
236 # this can happen when things are interrupted at just the wrong time.
237 # Note that the objects/pack directory is created by git init and should
238 # always exist.
239 find objects/pack -maxdepth 1 -type f -mmin +60 -name "pack-$octet20.?*" -print | \
240 LC_ALL=C sed -e 's/^objects\/pack\/pack-//; s/\..*$//' | LC_ALL=C sort -u | \
241 while read packsha; do
242 [ ! -e "objects/pack/pack-$packsha.pack" ] || continue
243 rm -f "objects/pack/pack-$packsha".?*
244 done
246 # Remove any stale pack .keep files that are more than 12 hours old.
247 # We don't do anything to create any permanent pack .keep files, so they must
248 # be remnants from some failed push or something. Removing the .keep will
249 # allow the pack to be properly repacked.
250 find objects/pack -maxdepth 1 -type f -mmin +720 -name "pack-$octet20.keep" -print0 | xargs -0 rm -f
252 # Remove any stale tmp_pack_*, tmp_idx_*, tmp_bitmap_*, packtmp-* or .tmp-*-pack files
253 # that are more than 12 hours old.
254 find objects/pack -maxdepth 1 -type f -mmin +720 \( \
255 -name "tmp_pack_?*" -o -name "tmp_idx_?*" -o -name "tmp_bitmap_?*" -o \
256 -name "packtmp-?*" -o -name ".tmp-?*-pack" \
257 \) -print0 | xargs -0 rm -f
259 # Remove any stale shallow_* files that are more than 12 hours old.
260 # These can be left behind by Git >= 1.8.4.2 and < 2.0.0 when a client
261 # requests a shallow clone.
262 find . -maxdepth 1 -type f -mmin +720 -name "shallow_?*" -print0 | xargs -0 rm -f
264 # Remove any stale *.temp files in the objects area that are more than 12 hours old.
265 # This can be stale sha1.temp, or stale *.pack.temp so we kill all stale *.temp.
266 find objects -type f -mmin +720 -name "*.temp" -print0 | xargs -0 rm -f
268 # Remove any stale *.lock files in the htmlcache area that might have been left
269 # behind after an abnormal exit during an attempt to update a cached file and
270 # are more than 1 hour old.
271 ! [ -d htmlcache ] || find htmlcache -type f -mmin +60 -name "*.lock" -print0 | xargs -0 rm -f
273 # Remove any stale git-svn temp files that are more than 12 hours old.
274 # The git-svn process creates temp files with random 10 character names
275 # in the root of $GIT_DIR. Unfortunately they do not have a recognizable
276 # prefix, so we just have to kill any files with a 10-character name. We
277 # do this only for git-svn mirrors. All characters are chosen from
278 # [A-Za-z0-9_] so we can at least check that and fortunately the only
279 # collision is 'FETCH_HEAD' but that shouldn't matter.
280 # There may also be temp files with a Git_ prefix as well.
281 if is_svn_mirror; then
282 _randchar='[A-Za-z0-9_]'
283 _randchar2="$_randchar$_randchar"
284 _randchar4="$_randchar2$_randchar2"
285 _randchar10="$_randchar4$_randchar4$_randchar2"
286 find . -maxdepth 1 -type f -mmin +720 -name "$_randchar10" -print0 | xargs -0 rm -f
287 find . -maxdepth 1 -type f -mmin +720 -name "Git_*" -print0 | xargs -0 rm -f
290 # Remove any stale fast_import_crash_<pid> files that are more than 3 days old.
291 if is_gfi_mirror; then
292 find . -maxdepth 1 -type f -mmin +4320 -name "fast_import_crash_?*" -print0 | xargs -0 rm -f
297 ## Garbage Collection Types
299 ## There are two kinds of possible garbage collection (gc) operations:
301 ## 1. A normal, full gc
302 ## 2. A "mini" gc
304 ## If the full garbage collection interval has expired (or gc has never been
305 ## run), then a normal, full gc will take place. Otherwise, a "mini" gc will
306 ## take place if the file .needsgc exists.
308 ## A "mini" gc is similar to "git gc --auto" in that it may not end up actually
309 ## doing anything unless the right conditions are present so it's not a burden
310 ## to run it often. If the file .needsgc exists, a "mini" gc will occur at
311 ## the next opportunity.
313 ## Note, however, that the .nogc file suppresses ALL gc activity (normal or mini).
316 proj="${1%.git}"
317 shift
318 cd "$cfg_reporoot/$proj.git"
320 trap 'e=$?; rm -f .gc_in_progress; if [ $e != 0 ]; then echo "gc failed dir: $PWD" >&2; fi' EXIT
321 trap 'exit 130' INT
322 trap 'exit 143' TERM
324 # date -R is linux-only, POSIX equivalent is '+%a, %d %b %Y %T %z'
325 datefmt='+%a, %d %b %Y %T %z'
327 isminigc=
328 if check_interval lastgc $cfg_min_gc_interval; then
329 if [ -e .needsgc ]; then
330 isminigc=1
331 else
332 progress "= [$proj] garbage check skip (last at $(config_get lastgc))"
333 exit 0
336 if [ -e .nogc ]; then
337 progress "x [$proj] garbage check disabled"
338 exit 0
341 if [ -n "$isminigc" ]; then
342 # Perform a "mini" gc
343 # Note that .delaygc is ignored here as that's only intended for full gc
344 lock_gc
345 rm -f .allowgc .needsgc
346 remove_crud
347 miniactive=
348 if [ -f .svnpack ] && is_svn_mirror; then
349 miniactive=1
350 progress "+ [$proj] mini garbage check (`date`)"
351 make_svn_pack
353 # If there aren't at least 10 non-keep, non-bitmap, non-bndl packs then
354 # don't actually process them yet
355 lpo="--exclude-no-idx --exclude-keep --exclude-bitmap --exclude-bndl --quiet"
356 packcnt="$(list_packs --count $lpo objects/pack || :)"
357 if [ "${packcnt:-0}" -ge 10 ]; then
358 if [ -z "$miniactive" ]; then
359 miniactive=1
360 progress "+ [$proj] mini garbage check (`date`)"
362 if [ -f gfi-packs -a -s gfi-packs ] && is_gfi_mirror; then
363 repack_gfi_packs
364 packcnt="$(list_packs --count $lpo objects/pack || :)"
366 # if repack_gfi_packs dropped the pack count to < 10 don't combine
367 if [ "${packcnt:-0}" -ge 10 ]; then
368 combine_small_packs
369 packcnt="$(list_packs --count $lpo objects/pack || :)"
371 # if we still have more than 10 packs trigger a full gc
372 if [ "${packcnt:-0}" -ge 10 ]; then
373 # We shouldn't be in a .delaygc state at this point, but if
374 # we are then nuke it because we really need a full gc now
375 rm -f .delaygc
376 git config --unset gitweb.lastgc
377 rm -f "$lockf"
378 progress "- [$proj] mini garbage check triggering full gc too many packs (`date`)"
379 exit 0
382 rm -f "$lockf"
383 if [ -n "$miniactive" ]; then
384 progress "- [$proj] mini garbage check (`date`)"
385 else
386 progress "= [$proj] mini garbage check nothing but crud removal to do (`date`)"
388 exit 0
391 # Avoid unnecessary garbage collections:
392 # 1. If lastreceive is set and is older than lastgc
393 # -AND-
394 # 2. We are not a fork (! -s alternates) -OR- lastparentgc is older than lastgc
396 # If lastgc is NOT set or lastreceive is NOT set we MUST run gc
397 # If we are a fork and lastparentgc is NOT set we MUST run gc
399 # If the repo is dirty after removing any crud we MUST run gc
401 gcstart="$(date "$datefmt")"
402 skipgc=
403 isfork=
404 [ -s objects/info/alternates ] && isfork=1
405 lastparentgcsecs=
406 [ -n "$isfork" ] && lastparentgcsecs="$(config_get_date_seconds lastparentgc || :)"
407 lastreceivesecs=
408 if lastreceivesecs="$(config_get_date_seconds lastreceive)" && \
409 lastgcsecs="$(config_get_date_seconds lastgc)" && \
410 [ $lastreceivesecs -lt $lastgcsecs ]; then
411 # We've run gc since we last received, so maybe we can skip,
412 # check if not fork or fork and lastparentgc < lastgc
413 if [ -n "$isfork" ]; then
414 if [ -n "$lastparentgcsecs" ] && \
415 [ $lastparentgcsecs -lt $lastgcsecs ]; then
416 # We've run gc since our parent ran gc so we can skip
417 skipgc=1
419 else
420 # We don't have any alternates (we're not a forK) so we can skip
421 skipgc=1
425 # Prevent any other simultaneous gc operations
426 lock_gc
428 # At this point, if .allowgc exists, it's now crud to be removed
429 rm -f .allowgc
431 # Always get rid of crud
432 remove_crud
434 # Run 'git svn gc' now for svn mirrors
435 if is_svn_mirror; then
436 git svn gc || :
439 # Skip the actual gc if .delaygc is set
440 if [ -e .delaygc ]; then
441 progress "x [$proj] garbage check delayed (except for crud removal)"
442 rm -f "$lockf"
443 exit 0
446 # Do not skip gc if the repo is dirty
447 if [ -n "$skipgc" ] && ! is_dirty; then
448 progress "= [$proj] garbage check nothing but crud removal to do (`date`)"
449 config_set lastgc "$gcstart"
450 rm -f "$lockf"
451 exit 0
454 bumptime=
455 if [ -n "$isfork" ] && [ -z "$lastparentgcsecs" ]; then
456 # set lastparentgc and then update gcstart to be at least 1 second later
457 config_set lastparentgc "$gcstart"
458 bumptime=1
460 if [ -z "$lastreceivesecs" ]; then
461 # set lastreceive and then update gcstart to be at least 1 second later
462 config_set lastreceive "$gcstart"
463 bumptime=1
465 if [ -n "$bumptime" ]; then
466 sleep 1
467 gcstart="$(date "$datefmt")"
470 progress "+ [$proj] garbage check (`date`)"
472 newdeltas=
473 if [ -f gfi-packs -a -s gfi-packs ] && is_gfi_mirror; then
474 if [ $(list_packs --exclude-no-idx --count objects/pack) -le \
475 $(list_packs --exclude-no-idx --count --quiet --only gfi-packs) ]; then
476 # Don't bother with repack_gfi_packs since everything's being repacked
477 newdeltas=-f
480 if [ -z "$newdeltas" ] && \
481 [ $(list_packs --exclude-no-idx --count-objects objects/pack) -le $var_redelta_threshold ]; then
482 # There aren't enough objects to worry about so just redelta to get the best pack
483 newdeltas=-f
485 if [ -z "$newdeltas" ] || has_forks "$proj"; then
486 # Since we're not going to recompute deltas overall, we need to do the "mini"
487 # maintenance and by doing it before we copy objects down to forks we reduce
488 # the amount that gets sprayed into the forks' objects directories.
489 # If we have forks we always need to do the "mini" maintenance, even if we are
490 # recomputing all deltas, in order to avoid having suboptimal packs in the forks.
491 make_svn_pack
492 repack_gfi_packs
493 combine_small_packs
496 # safe pruning: we put all our objects in all forks, then we can
497 # safely get rid of extra ones; repacks in forks will get rid of
498 # the redundant ones again then; we carefully grab only loose
499 # objects and pack .idx and .pack files
500 forkdir="$proj"
501 if [ -d "../${forkdir##*/}" ]; then
502 # It is enough to copy objects just one level down and get_repo_list
503 # takes a regular expression (which is automatically prefixed with '^')
504 # so we can easily match forks exactly one level down from this project
505 get_repo_list "$forkdir/[^/]*:" |
506 while read fork; do
507 # Ignore forks that do not exist or are symbolic links
508 [ ! -L "$cfg_reporoot/$fork.git" -a -d "$cfg_reporoot/$fork.git" ] || \
509 continue
510 # Or do not have a non-zero length alternates file
511 [ -s "$cfg_reporoot/$fork.git/objects/info/alternates" ] || \
512 continue
513 # Match objects in parent project
514 for d in objects/??; do
515 [ "$d" != "objects/??" ] || continue
516 mkdir -p "$cfg_reporoot/$fork.git/$d"
517 ln -f "$d"/* "$cfg_reporoot/$fork.git/$d" || :
518 done
519 # Match packs in parent project
520 mkdir -p "$cfg_reporoot/$fork.git/objects/pack"
521 if [ "$(echo objects/pack/pack-*.idx)" != \
522 "objects/pack/pack-*.idx" ]; then
523 ln -f objects/pack/pack-*.pack "$cfg_reporoot/$fork.git/objects/pack" || :
524 ln -f objects/pack/pack-*.idx "$cfg_reporoot/$fork.git/objects/pack" || :
526 # Update the fork's lastparentgc date (must be current, not $gcstart)
527 GIT_DIR="$cfg_reporoot/$fork.git" git config \
528 gitweb.lastparentgc "$(date "$datefmt")"
529 done
532 git pack-refs --all
533 touch .gc_in_progress
534 rm -f .gc_failed bundles/*
535 rm -f objects/pack/pack-*.bndl
536 # We use the -A option with git repack so that unreachable objects can live
537 # on for a time as loose objects. This is particularly helpful if we just
538 # happen to be in the process of sending out a ref update for a ref that was
539 # force updated and the old ref value would have otherwise been removed by
540 # repack because it was now unreachable. Admittedly the window for gc to run
541 # and do that before we manage to send out the ref update is not large, but
542 # it would not be difficult to create such a situation. Unfortunately, when
543 # Git unpacks these unreachable objects it will give them the modification
544 # time of the *.pack file they came out of. This could be very, very old.
545 # If that happens, the subsequent git prune --expire some_time_ago will still
546 # remove the object(s) and our pending ref update will still lose out.
547 # To prevent this from happening and to get the behavior we want, we now
548 # touch the modification time of all pack-<sha>.pack files so that any
549 # loosened objects get a current time. Git does not provide any other
550 # mechanism to do this. We do not want to just touch all loose objects
551 # left after the repack because that would cause objects that were loosened
552 # previously to live on which we definitely do not want.
553 list_packs --exclude-no-idx objects/pack | xargs touch -c 2>/dev/null || :
554 # We wish to keep deltas from our last full pack so if we're not redeltaing
555 # then make sure the .pack associated with the .bitmap has a newer mod time
556 # (If there is no .bitmap then touch the pack with the most objects instead.)
557 if [ -z "$newdeltas" ]; then
558 bmpack="$(list_packs --exclude-no-bitmap --exclude-no-idx --max-matches 1 objects/pack)"
559 [ -n "$bmpack" ] || bmpack="$(list_packs --exclude-no-idx --max-matches 1 --object-limit -1 --include-boundary objects/pack)"
560 if [ -n "$bmpack" ] && [ -f "$bmpack" -a -s "$bmpack" ]; then
561 sleep 1
562 touch -c "$bmpack" 2>/dev/null || :
565 # The git repack command may issue a 'disabling bitmap' warning for some
566 # repositories. This is perfectly normal and should be suppressed unless
567 # show_progress is set. Unfortunately that means we have to grep -v the
568 # output. And furthermore, since it's a translated message, we have to
569 # force the language to english to be sure we do it.
570 repackcmd="git repack $packopts -A -d -l $quiet $newdeltas $@"
571 [ -n "$show_progress" ] || \
572 repackcmd="{ LC_ALL=C $repackcmd 2>&1 || touch .gc_failed; } | LC_ALL=C grep -v 'disabling bitmap' || :"
573 eval "$repackcmd"
574 [ ! -e .gc_failed ] || exit 1
575 # These, if they exist, are now meaningless and need to be removed
576 rm -f gfi-packs .needsgc .svnpack .svnpackgc
577 allpacks="$(echo objects/pack/pack-$octet20.pack)"
578 curhead="$(cat HEAD)"
579 pkrf=
580 [ ! -e packed-refs ] || pkrf=packed-refs
581 eval "reposizek=$(( $(echo 0 $(du -k $pkrf $allpacks 2>/dev/null | LC_ALL=C awk '{print $1}') | \
582 LC_ALL=C sed -e 's/ / + /g') ))"
583 # The -A option to `git repack` may have caused some loose objects to pop
584 # out of their packs. We must make these objects group writable so that they
585 # can be freshened by other pushers. Technically we need only do this for
586 # push projects but to enable mirror projects to be more easily converted to
587 # push projects, we go ahead and do it for all projects.
588 { find objects/$octet -type f -name "$octet19" -print0 | xargs -0 chmod ug+w || :; } 2>/dev/null
589 # The git prune command does not take a -q or --quiet but started outputting
590 # 'Checking connectivity' progress messages in v1.7.9. However, we can
591 # suppress those by piping through cat as it only activates the progress
592 # messages when stderr is a tty. We only expire loose objects older than one
593 # day just in case there's some pending action (such as sending out a ref
594 # update) in progress that might want to examine them. This may leave us with
595 # loose objects. That's okay because at the next gc interval, we will always
596 # run gc if we see any loose objects regardless of whether or not we've seen
597 # any updates or we've received new linked objects from our parent. Note that
598 # in order to keep loose objects that just recently became unreferenced but
599 # have a very old modification date around we rely on some help from both the
600 # update.sh and hooks/pre-receive scripts. Furthermore, since Git v2.2.0
601 # (d3038d22 prune: keep objects reachable from recent objects) an unreachable
602 # object that would otherwise be pruned (because it's too old) will be kept
603 # alive by an unreachable object that refers to it that's not old enough to
604 # be pruned yet.
605 prunecmd='git prune --expire 1_day_ago'
606 [ -n "$show_progress" ] || \
607 prunecmd="{ $prunecmd 2>&1 || touch .gc_failed; } | cat"
608 eval "$prunecmd"
609 [ ! -e .gc_failed ] || exit 1
610 git update-server-info
612 # darcs:// mirrors have a xxx.log file that will grow endlessly
613 # if this is a mirror and the file exists, shorten it to 10000 lines
614 # also take this opportunity to optimize the darcs repo
615 if [ ! -e .nofetch ] && [ -n "$cfg_mirror" ]; then
616 url="$(config_get baseurl || :)"
617 case "$url" in darcs://*)
618 if [ -n "$cfg_mirror_darcs" ]; then
619 url="${url%/}"
620 basedarcs="$(basename "${url#darcs:/}")"
621 if [ -f "$basedarcs.log" ]; then
622 tail -n 10000 "$basedarcs.log" > "$basedarcs.log.$$"
623 mv -f "$basedarcs.log.$$" "$basedarcs.log"
625 if [ -d "$basedarcs.darcs" ]; then
627 cd "$basedarcs.darcs"
628 # Note that this does not optimize _darcs/inventories/ :(
629 darcs optimize
633 esac
636 # Create a matching .bndl header file for the all-in-one pack we just created
637 # but only if we're not a fork (otherwise the bundle would not be complete)
638 # and we are running at least Git version 1.7.2 (pack_is_complete always fails otherwise)
639 if [ ! -s objects/info/alternates ] && [ -n "$var_have_git_172" ]; then
640 # There should only be one pack in $allpacks but if there was a
641 # simultaneous push...
642 # The one we just created will have a .idx and will NOT have a .keep
643 pkfound=
644 pkhead=
645 for pk in $allpacks; do
646 [ -s "$pk" ] || continue
647 pkbase="${pk%.pack}"
648 [ -s "$pkbase.idx" ] || continue
649 [ ! -e "$pkbase.keep" ] || continue
650 if pkhead="$(pack_is_complete "$PWD/$pk" "$PWD/packed-refs" "$curhead")"; then
651 pkfound="$pkbase"
652 break;
654 done
655 if [ -n "$pkfound" -a -n "$pkhead" ]; then
657 echo "# v2 git bundle"
658 LC_ALL=C sed -ne "/^$octet20 refs\/[^ $tab]*\$/ p" < packed-refs
659 echo "$pkhead HEAD"
660 echo ""
661 } > "$pkbase.bndl"
662 bndletag="$("$cfg_basedir/bin/rangecgi" --etag -m 1 "$pkbase.bndl" "$pkbase.pack" || :)"
663 bndlsha="$(printf '%s' "$bndletag" | git hash-object --stdin || :)"
664 if [ -n "$bndletag" ]; then
665 case "$bndlsha" in $octet20)
666 bndlshatrailer="${bndlsha#????????}"
667 bndlshaprefix="${bndlsha%$bndlshatrailer}"
668 bndlname="$(TZ=UTC date +%Y%m%d_%H%M%S)-${bndlshaprefix:-0}"
669 [ -d bundles ] || mkdir bundles
670 echo "${pkbase#objects/pack/}.bndl" > "bundles/$bndlname"
671 echo "${pkbase#objects/pack/}.pack" >> "bundles/$bndlname"
672 ln -s -f -n "$bndlname" bundles/latest
673 esac
678 # Record the size of this repo as the sum of its *.pack sizes as 1024-byte blocks
679 config_set_raw girocco.reposizek "${reposizek:-0}"
681 # We use $gcstart here to avoid a race where a push occurs during the gc itself
682 # and the next future gc could be incorrectly skipped if we used the current
683 # timestamp here instead
684 config_set lastgc "$gcstart"
685 rm -f "$lockf"
687 progress "- [$proj] garbage check (`date`)"