gc.sh: support girocco.redelta=always config
[girocco.git] / jobd / gc.sh
blobd59d8ef37142aab83090a0b6787b56f4d4e4e4ff
1 #!/bin/sh
3 # NOTE: additional options can be passed to git repack by specifying
4 # them after the project name, for example:
5 # gc.sh my-project -f
7 . @basedir@/shlib.sh
9 set -e
11 if [ $# -lt 1 ]; then
12 echo "Usage: gc.sh projname [extra-repack-args]" >&2
13 exit 1
16 # packing options
17 packopts="--depth=50 --window=50 --window-memory=${var_window_memory:-1g}"
18 quiet=; [ -n "$show_progress" ] || quiet=-q
20 umask 002
21 [ "$cfg_permission_control" != "Hooks" ] || umask 000
23 pidactive() {
24 if _result="$(kill -0 "$1" 2>&1)"; then
25 # process exists and we have permission to signal it
26 return 0
28 case "$_result" in *"not permitted"*)
29 # we do not have permission to signal the process
30 return 0
31 esac
32 # process does not exist
33 return 1
36 createlock() {
37 # A .lock file should only exist for much less than a second.
38 # If we see a stale lock file (> 1h old), remove it and then,
39 # just in case, wait 30 seconds for any process whose .lock
40 # we might have just removed (it's racy) to finish doing what
41 # should take much less than a second to do.
42 _stalelock="$(find "$1.lock" -maxdepth 1 -mmin +60 -print 2>/dev/null || :)"
43 if [ -n "$_stalelock" ]; then
44 rm -f "$_stalelock"
45 sleep 30
47 for _try in p p n; do
48 if (set -C; > "$1.lock") 2>/dev/null; then
49 echo "$1.lock"
50 return 0
52 # delay and try again
53 [ "$_try" != "p" ] || sleep 1
54 done
55 # cannot create lock file
56 return 1
59 # return true if there's more than one objects/pack-<sha>.pack file or
60 # ANY sha-1 files in objects
61 is_dirty() {
62 _packs=$(find objects/pack -type f -name "pack-$octet20.pack" -print | head -n 2 | LC_ALL=C wc -l)
63 if [ $_packs != 1 ] && [ $_packs != 0 ]; then
64 return 0
66 _objs=$(find objects/$octet -type f -name "$octet19" -print 2>/dev/null | head -n 1 | LC_ALL=C wc -l)
67 [ $_objs -ne 0 ]
70 # make sure combine-packs uses the correct Git executable
71 run_combine_packs() {
72 PATH="$var_git_exec_path:$cfg_basedir/bin:$PATH" @basedir@/jobd/combine-packs.sh "$@"
75 # combine the input pack(s) into a new pack (or possibly packs if packSizeLimit set)
76 # input pack names are read from standard input one per line delimited by the first
77 # ':', ' ' or '\n' character on the line (which allows gfi-packs to be read directly)
78 # all arguments, if any, are passed to pack-objects as additional options
79 # returns non-zero on failure AND creates .gc_failed in that case
80 combine_packs() {
81 rm -f .gc_failed
82 find objects/pack -maxdepth 1 -type f -name '*.zap*' -print0 | xargs -0 rm -f
83 run_combine_packs --replace "$@" $packopts --all-progress-implied $quiet --non-empty || {
84 >.gc_failed
85 return 1
87 return 0
90 # if the current directory is_gfi_mirror then repack all packs listed in gfi-packs
91 repack_gfi_packs() {
92 [ -f gfi-packs -a -s gfi-packs ] && is_gfi_mirror || return 0
93 [ -d objects/pack ] || { rm -f gfi-packs; return 0; }
94 combine_packs --ignore-missing --no-reuse-delta <gfi-packs
95 rm -f gfi-packs
96 return 0
99 # combine small packs into larger pack(s)
100 # we avoid any keep, bndl or bitmap packs
101 # if the optional argument is non-empty even a single small pack will be redeltad
102 combine_small_packs() {
103 _minsmallpacks=2
104 if [ -n "$1" ] && [ -n "$noreusedeltaopt" ]; then
105 _minsmallpacks=1
107 _lpo="--exclude-no-idx --exclude-keep --exclude-bitmap --exclude-bndl --quiet"
108 _lpo="$_lpo --object-limit $var_redelta_threshold objects/pack"
109 while
110 _cnt="$(list_packs --count $_lpo || :)"
111 test "${_cnt:-0}" -ge $_minsmallpacks
113 _newp="$(list_packs $_lpo | combine_packs --names $noreusedeltaopt)"
114 _newc="$(echo $(echo "$_newp" | LC_ALL=C wc -w))"
115 # be paranoid and exit the loop if we haven't reduced the number of packs
116 [ $_newc -lt $_cnt ] || break
117 _minsmallpacks=2
118 done
119 return 0
122 # Unfortunately git-svn lacks the ability to store newly fetched revisions as a pack.
123 # However, the fetch code conveniently sets .svnpack just before it runs git-svn fetch
124 # so that it's easy to find all the objects that have been fetched by git-svn and
125 # combine them into a pack. The --no-reuse-delta option is meaningless here since
126 # everything to be packed is a loose object and therefore not a delta so deltification
127 # will always take place.
128 make_svn_pack() {
129 [ -f .svnpack ] && is_svn_mirror || return 0
130 rm -f .svnpackgc
131 mv -f .svnpack .svnpackgc
132 _newp="$(find objects/$octet -maxdepth 1 -type f -newer .svnpackgc -name "$octet19" -print 2>/dev/null |
133 LC_ALL=C awk -F / '{print $2 $3}' |
134 run_combine_packs --objects --names $packopts --incremental --all-progress-implied $quiet --non-empty)" || {
135 mv -f .svnpackgc .svnpack
136 >.gc_failed
137 return 1
139 if [ -n "$_newp" ]; then
140 # remove the now-redundant loose objects -- this is always safe
141 # even during a concurrent push because a reprepare_packed_git
142 # will be triggered if an object that should be there is not
143 # found thereby finding it in the new pack instead
144 git prune-packed $quiet
146 rm -f .svnpackgc
149 # HEADSHA="$(pack_is_complete /full/path/to/some.pack /full/path/to/packed-refs "$(cat HEAD)")"
150 pack_is_complete() {
151 # Must have a matching .idx file and a non-empty packed-refs file
152 [ -s "${1%.pack}.idx" ] || return 1
153 [ -s "$2" ] || return 1
154 _headsha=
155 case "$3" in
156 $octet20)
157 _headsha="$3"
159 "ref: refs/"?*|"ref:refs/"?*|"refs/"?*)
160 _headmatch="${3#ref:}"
161 _headmatch="${_headmatch# }"
162 _headmatchpat="$(echo "$_headmatch" | LC_ALL=C sed -e 's/\([.$]\)/\\\1/g')"
163 _headsha="$(LC_ALL=C grep -e "^$octet20 $_headmatchpat\$" < "$2" | \
164 LC_ALL=C cut -d ' ' -f 1)"
165 case "$_headsha" in $octet20) :;; *)
166 return 1
167 esac
170 # bad HEAD
171 return 1
172 esac
173 rm -rf pack_is_complete_test
174 mkdir pack_is_complete_test
175 mkdir pack_is_complete_test/refs
176 mkdir pack_is_complete_test/objects
177 mkdir pack_is_complete_test/objects/pack
178 echo "$_headsha" > pack_is_complete_test/HEAD
179 ln -s "$1" pack_is_complete_test/objects/pack/
180 ln -s "${1%.pack}.idx" pack_is_complete_test/objects/pack/
181 ln -s "$2" pack_is_complete_test/packed-refs
182 _count="$(git --git-dir=pack_is_complete_test rev-list --count --all 2>/dev/null || :)"
183 rm -rf pack_is_complete_test
184 [ -n "$_count" ] || return 1
185 [ "$_count" -gt 0 ] 2>/dev/null || return 1
186 echo "$_headsha"
189 # On return a "$lockf" will have been created that must be removed when gc is done
190 lock_gc() {
191 # be compatibile with gc.pid file from newer Git releases
192 lockf=gc.pid
193 hn="$(hostname)"
194 active=
195 if [ "$(createlock "$lockf")" ]; then
196 # If $lockf is:
197 # 1) less than 12 hours old
198 # 2) contains two fields (pid hostname) NO trailing NL
199 # 3) the hostname is different OR the pid is still alive
200 # then we exit as another active process is holding the lock
201 if [ "$(find "$lockf" -maxdepth 1 -mmin -720 -print 2>/dev/null)" ]; then
202 apid=
203 ahost=
204 read -r apid ahost ajunk < "$lockf" || :
205 if [ "$apid" ] && [ "$ahost" ]; then
206 if [ "$ahost" != "$hn" ] || pidactive "$apid"; then
207 active=1
211 else
212 echo >&2 "[$proj] unable to create gc.pid.lock file"
213 exit 1
215 if [ -n "$active" ]; then
216 rm -f "$lockf.lock"
217 echo >&2 "[$proj] gc already running on machine '$ahost' pid '$apid'"
218 exit 1
220 printf "%s %s" "$$" "$hn" > "$lockf.lock"
221 chmod 0664 "$lockf.lock"
222 mv -f "$lockf.lock" "$lockf"
225 # Remove any crud that's been left behind by interrupted operations
226 # that did not clean up after themselves
227 remove_crud() {
228 # Remove any existing FETCH_HEAD
229 # There can only be a FETCH_HEAD if we've been fetching, not if we've been
230 # receiving pushes (those never create a FETCH_HEAD).
231 # And if we're fetching because we're a mirror, we know we're not fetching right
232 # now since jobd.pl never runs a project's fetch simultaneously with its gc.
233 # Therefore any existing FETCH_HEAD is junk. And it may be many megabytes if
234 # there were a lot of refs.
235 rm -f FETCH_HEAD
237 # Remove any stale pack remnants that are more than an hour old.
238 # Stale pack fragments are defined as any pack-<sha1>.ext where .ext is NOT
239 # .pack AND the corresponding .pack DOES NOT exist. A bunch of stale
240 # pack-<sha1>.idx files without their corresponding .pack files are worthless
241 # and just waste space. Normally there shouldn't be any remnants but actually
242 # this can happen when things are interrupted at just the wrong time.
243 # Note that the objects/pack directory is created by git init and should
244 # always exist.
245 find objects/pack -maxdepth 1 -type f -mmin +60 -name "pack-$octet20.?*" -print | \
246 LC_ALL=C sed -e 's/^objects\/pack\/pack-//; s/\..*$//' | LC_ALL=C sort -u | \
247 while read packsha; do
248 [ ! -e "objects/pack/pack-$packsha.pack" ] || continue
249 rm -f "objects/pack/pack-$packsha".?*
250 done
252 # Remove any stale pack .keep files that are more than 12 hours old.
253 # We don't do anything to create any permanent pack .keep files, so they must
254 # be remnants from some failed push or something. Removing the .keep will
255 # allow the pack to be properly repacked.
256 find objects/pack -maxdepth 1 -type f -mmin +720 -name "pack-$octet20.keep" -print0 | xargs -0 rm -f
258 # Remove any stale tmp_pack_*, tmp_idx_*, tmp_bitmap_*, packtmp-* or .tmp-*-pack files
259 # that are more than 12 hours old.
260 find objects/pack -maxdepth 1 -type f -mmin +720 \( \
261 -name "tmp_pack_?*" -o -name "tmp_idx_?*" -o -name "tmp_bitmap_?*" -o \
262 -name "packtmp-?*" -o -name ".tmp-?*-pack" \
263 \) -print0 | xargs -0 rm -f
265 # Remove any stale shallow_* files that are more than 12 hours old.
266 # These can be left behind by Git >= 1.8.4.2 and < 2.0.0 when a client
267 # requests a shallow clone.
268 find . -maxdepth 1 -type f -mmin +720 -name "shallow_?*" -print0 | xargs -0 rm -f
270 # Remove any stale *.temp files in the objects area that are more than 12 hours old.
271 # This can be stale sha1.temp, or stale *.pack.temp so we kill all stale *.temp.
272 find objects -type f -mmin +720 -name "*.temp" -print0 | xargs -0 rm -f
274 # Remove any stale *.lock files in the htmlcache area that might have been left
275 # behind after an abnormal exit during an attempt to update a cached file and
276 # are more than 1 hour old.
277 ! [ -d htmlcache ] || find htmlcache -type f -mmin +60 -name "*.lock" -print0 | xargs -0 rm -f
279 # Remove any stale git-svn temp files that are more than 12 hours old.
280 # The git-svn process creates temp files with random 10 character names
281 # in the root of $GIT_DIR. Unfortunately they do not have a recognizable
282 # prefix, so we just have to kill any files with a 10-character name. We
283 # do this only for git-svn mirrors. All characters are chosen from
284 # [A-Za-z0-9_] so we can at least check that and fortunately the only
285 # collision is 'FETCH_HEAD' but that shouldn't matter.
286 # There may also be temp files with a Git_ prefix as well.
287 if is_svn_mirror; then
288 _randchar='[A-Za-z0-9_]'
289 _randchar2="$_randchar$_randchar"
290 _randchar4="$_randchar2$_randchar2"
291 _randchar10="$_randchar4$_randchar4$_randchar2"
292 find . -maxdepth 1 -type f -mmin +720 -name "$_randchar10" -print0 | xargs -0 rm -f
293 find . -maxdepth 1 -type f -mmin +720 -name "Git_*" -print0 | xargs -0 rm -f
296 # Remove any stale fast_import_crash_<pid> files that are more than 3 days old.
297 if is_gfi_mirror; then
298 find . -maxdepth 1 -type f -mmin +4320 -name "fast_import_crash_?*" -print0 | xargs -0 rm -f
303 ## Garbage Collection Types
305 ## There are two kinds of possible garbage collection (gc) operations:
307 ## 1. A normal, full gc
308 ## 2. A "mini" gc
310 ## If the full garbage collection interval has expired (or gc has never been
311 ## run), then a normal, full gc will take place. Otherwise, a "mini" gc will
312 ## take place if the file .needsgc exists.
314 ## A "mini" gc is similar to "git gc --auto" in that it may not end up actually
315 ## doing anything unless the right conditions are present so it's not a burden
316 ## to run it often. If the file .needsgc exists, a "mini" gc will occur at
317 ## the next opportunity.
319 ## Note, however, that the .nogc file suppresses ALL gc activity (normal or mini).
322 proj="${1%.git}"
323 shift
324 cd "$cfg_reporoot/$proj.git"
326 # If git config --bool --get girocco.redelta is explicitly false then automatic
327 # redelta when there are less than $var_redelta_threshold objects will be suppressed.
328 # On the other hand, if git config --get girocco.redelta is "always" then, on a full
329 # gc only, for the final repack, deltas will always be recomputed.
330 # This can be set on a per-project basis to avoid unusual pathological gc behavior.
331 # Setting this will hurt efficiency of the affected repository.
332 # Note that fast-import packs ALWAYS get new deltas regardless of this setting.
333 noreusedeltaopt="--no-reuse-delta"
334 [ "$(git config --bool --get girocco.redelta 2>/dev/null || :)" != "false" ] || noreusedeltaopt=
335 alwaysredelta=
336 [ "$(git config --get girocco.redelta 2>/dev/null || :)" != "always" ] || alwaysredelta=1
338 trap 'e=$?; rm -f .gc_in_progress; if [ $e != 0 ]; then echo "gc failed dir: $PWD" >&2; fi' EXIT
339 trap 'exit 130' INT
340 trap 'exit 143' TERM
342 # date -R is linux-only, POSIX equivalent is '+%a, %d %b %Y %T %z'
343 datefmt='+%a, %d %b %Y %T %z'
345 isminigc=
346 if check_interval lastgc $cfg_min_gc_interval; then
347 if [ -e .needsgc ]; then
348 isminigc=1
349 else
350 progress "= [$proj] garbage check skip (last at $(config_get lastgc))"
351 exit 0
354 if [ -e .nogc ]; then
355 progress "x [$proj] garbage check disabled"
356 exit 0
359 if [ -n "$isminigc" ]; then
360 # Perform a "mini" gc
361 # Note that .delaygc is ignored here as that's only intended for full gc
362 lock_gc
363 rm -f .allowgc .needsgc
364 remove_crud
365 miniactive=
366 if [ -f .svnpack ] && is_svn_mirror; then
367 miniactive=1
368 progress "+ [$proj] mini garbage check (`date`)"
369 make_svn_pack
371 # If there aren't at least 10 non-keep, non-bitmap, non-bndl packs then
372 # don't actually process them yet
373 lpo="--exclude-no-idx --exclude-keep --exclude-bitmap --exclude-bndl --quiet"
374 packcnt="$(list_packs --count $lpo objects/pack || :)"
375 if [ "${packcnt:-0}" -ge 10 ]; then
376 if [ -z "$miniactive" ]; then
377 miniactive=1
378 progress "+ [$proj] mini garbage check (`date`)"
380 if [ -f gfi-packs -a -s gfi-packs ] && is_gfi_mirror; then
381 repack_gfi_packs
382 packcnt="$(list_packs --count $lpo objects/pack || :)"
384 # if repack_gfi_packs dropped the pack count to < 10 don't combine
385 if [ "${packcnt:-0}" -ge 10 ]; then
386 combine_small_packs
387 packcnt="$(list_packs --count $lpo objects/pack || :)"
389 # if we still have more than 10 packs trigger a full gc
390 if [ "${packcnt:-0}" -ge 10 ]; then
391 # We shouldn't be in a .delaygc state at this point, but if
392 # we are then nuke it because we really need a full gc now
393 rm -f .delaygc
394 git config --unset gitweb.lastgc
395 rm -f "$lockf"
396 progress "- [$proj] mini garbage check triggering full gc too many packs (`date`)"
397 exit 0
400 rm -f "$lockf"
401 if [ -n "$miniactive" ]; then
402 git update-server-info
403 progress "- [$proj] mini garbage check (`date`)"
404 else
405 progress "= [$proj] mini garbage check nothing but crud removal to do (`date`)"
407 exit 0
410 # Avoid unnecessary garbage collections:
411 # 1. If lastreceive is set and is older than lastgc
412 # -AND-
413 # 2. We are not a fork (! -s alternates) -OR- lastparentgc is older than lastgc
415 # If lastgc is NOT set or lastreceive is NOT set we MUST run gc
416 # If we are a fork and lastparentgc is NOT set we MUST run gc
418 # If the repo is dirty after removing any crud we MUST run gc
420 gcstart="$(date "$datefmt")"
421 skipgc=
422 isfork=
423 [ -s objects/info/alternates ] && isfork=1
424 lastparentgcsecs=
425 [ -n "$isfork" ] && lastparentgcsecs="$(config_get_date_seconds lastparentgc || :)"
426 lastreceivesecs=
427 if lastreceivesecs="$(config_get_date_seconds lastreceive)" && \
428 lastgcsecs="$(config_get_date_seconds lastgc)" && \
429 [ $lastreceivesecs -lt $lastgcsecs ]; then
430 # We've run gc since we last received, so maybe we can skip,
431 # check if not fork or fork and lastparentgc < lastgc
432 if [ -n "$isfork" ]; then
433 if [ -n "$lastparentgcsecs" ] && \
434 [ $lastparentgcsecs -lt $lastgcsecs ]; then
435 # We've run gc since our parent ran gc so we can skip
436 skipgc=1
438 else
439 # We don't have any alternates (we're not a forK) so we can skip
440 skipgc=1
444 # Prevent any other simultaneous gc operations
445 lock_gc
447 # At this point, if .allowgc exists, it's now crud to be removed
448 rm -f .allowgc
450 # Always get rid of crud
451 remove_crud
453 # Run 'git svn gc' now for svn mirrors
454 if is_svn_mirror; then
455 git svn gc || :
458 # Skip the actual gc if .delaygc is set
459 if [ -e .delaygc ]; then
460 progress "x [$proj] garbage check delayed (except for crud removal)"
461 rm -f "$lockf"
462 exit 0
465 # Do not skip gc if the repo is dirty
466 if [ -n "$skipgc" ] && ! is_dirty; then
467 progress "= [$proj] garbage check nothing but crud removal to do (`date`)"
468 config_set lastgc "$gcstart"
469 rm -f "$lockf"
470 exit 0
473 bumptime=
474 if [ -n "$isfork" ] && [ -z "$lastparentgcsecs" ]; then
475 # set lastparentgc and then update gcstart to be at least 1 second later
476 config_set lastparentgc "$gcstart"
477 bumptime=1
479 if [ -z "$lastreceivesecs" ]; then
480 # set lastreceive and then update gcstart to be at least 1 second later
481 config_set lastreceive "$gcstart"
482 bumptime=1
484 if [ -n "$bumptime" ]; then
485 sleep 1
486 gcstart="$(date "$datefmt")"
489 progress "+ [$proj] garbage check (`date`)"
491 newdeltas=
492 [ -z "$alwaysredelta" ] || newdeltas=-f
493 if [ -z "$newdeltas" ] && [ -f gfi-packs -a -s gfi-packs ] && is_gfi_mirror; then
494 if [ $(list_packs --exclude-no-idx --count objects/pack) -le \
495 $(list_packs --exclude-no-idx --count --quiet --only gfi-packs) ]; then
496 # Don't bother with repack_gfi_packs since everything's being repacked
497 newdeltas=-f
500 if [ -z "$newdeltas" ] && [ -n "$noreusedeltaopt" ] && \
501 [ $(list_packs --exclude-no-idx --count-objects objects/pack) -le $var_redelta_threshold ]; then
502 # There aren't enough objects to worry about so just redelta to get the best pack
503 newdeltas=-f
505 if [ -z "$newdeltas" ] || has_forks "$proj"; then
506 # Since we're not going to recompute deltas overall, we need to do the "mini"
507 # maintenance and by doing it before we copy objects down to forks we reduce
508 # the amount that gets sprayed into the forks' objects directories.
509 # If we have forks we always need to do the "mini" maintenance, even if we are
510 # recomputing all deltas, in order to avoid having suboptimal packs in the forks.
511 make_svn_pack
512 repack_gfi_packs
513 combine_small_packs 1
516 # safe pruning: we put all our objects in all forks, then we can
517 # safely get rid of extra ones; repacks in forks will get rid of
518 # the redundant ones again then; we carefully grab only loose
519 # objects and pack .idx and .pack files
520 forkdir="$proj"
521 if [ -d "../${forkdir##*/}" ]; then
522 # It is enough to copy objects just one level down and get_repo_list
523 # takes a regular expression (which is automatically prefixed with '^')
524 # so we can easily match forks exactly one level down from this project
525 get_repo_list "$forkdir/[^/]*:" |
526 while read fork; do
527 # Ignore forks that do not exist or are symbolic links
528 [ ! -L "$cfg_reporoot/$fork.git" -a -d "$cfg_reporoot/$fork.git" ] || \
529 continue
530 # Or do not have a non-zero length alternates file
531 [ -s "$cfg_reporoot/$fork.git/objects/info/alternates" ] || \
532 continue
533 # Match objects in parent project
534 for d in objects/??; do
535 [ "$d" != "objects/??" ] || continue
536 mkdir -p "$cfg_reporoot/$fork.git/$d"
537 ln -f "$d"/* "$cfg_reporoot/$fork.git/$d" || :
538 done
539 # Match packs in parent project
540 mkdir -p "$cfg_reporoot/$fork.git/objects/pack"
541 if [ "$(echo objects/pack/pack-*.idx)" != \
542 "objects/pack/pack-*.idx" ]; then
543 ln -f objects/pack/pack-*.pack "$cfg_reporoot/$fork.git/objects/pack" || :
544 ln -f objects/pack/pack-*.idx "$cfg_reporoot/$fork.git/objects/pack" || :
545 if ! [ -e "$cfg_reporoot/$fork.git/.needsgc" ]; then
546 # Trigger a mini gc in the fork if it now has too many packs
547 packs="$(list_packs --quiet --count --exclude-no-idx "$cfg_reporoot/$fork.git/objects/pack" || :)"
548 if [ -n "$packs" ] && [ "$packs" -ge 20 ]; then
549 >"$cfg_reporoot/$fork.git/.needsgc"
552 git --git-dir="$cfg_reporoot/$fork.git" update-server-info
554 # Update the fork's lastparentgc date (must be current, not $gcstart)
555 git --git-dir="$cfg_reporoot/$fork.git" config \
556 gitweb.lastparentgc "$(date "$datefmt")"
557 done
560 git pack-refs --all
561 touch .gc_in_progress
562 rm -f .gc_failed bundles/*
563 rm -f objects/pack/pack-*.bndl
564 # We use the -A option with git repack so that unreachable objects can live
565 # on for a time as loose objects. This is particularly helpful if we just
566 # happen to be in the process of sending out a ref update for a ref that was
567 # force updated and the old ref value would have otherwise been removed by
568 # repack because it was now unreachable. Admittedly the window for gc to run
569 # and do that before we manage to send out the ref update is not large, but
570 # it would not be difficult to create such a situation. Unfortunately, when
571 # Git unpacks these unreachable objects it will give them the modification
572 # time of the *.pack file they came out of. This could be very, very old.
573 # If that happens, the subsequent git prune --expire some_time_ago will still
574 # remove the object(s) and our pending ref update will still lose out.
575 # To prevent this from happening and to get the behavior we want, we now
576 # touch the modification time of all pack-<sha>.pack files so that any
577 # loosened objects get a current time. Git does not provide any other
578 # mechanism to do this. We do not want to just touch all loose objects
579 # left after the repack because that would cause objects that were loosened
580 # previously to live on which we definitely do not want.
581 list_packs --exclude-no-idx objects/pack | xargs touch -c 2>/dev/null || :
582 # We wish to keep deltas from our last full pack so if we're not redeltaing
583 # then make sure the .pack associated with the .bitmap has a newer mod time
584 # (If there is no .bitmap then touch the pack with the most objects instead.)
585 if [ -z "$newdeltas" ]; then
586 bmpack="$(list_packs --exclude-no-bitmap --exclude-no-idx --max-matches 1 objects/pack)"
587 [ -n "$bmpack" ] || bmpack="$(list_packs --exclude-no-idx --max-matches 1 --object-limit -1 --include-boundary objects/pack)"
588 if [ -n "$bmpack" ] && [ -f "$bmpack" -a -s "$bmpack" ]; then
589 sleep 1
590 touch -c "$bmpack" 2>/dev/null || :
593 # The git repack command may issue a 'disabling bitmap' warning for some
594 # repositories. This is perfectly normal and should be suppressed unless
595 # show_progress is set. Unfortunately that means we have to grep -v the
596 # output. And furthermore, since it's a translated message, we have to
597 # force the language to english to be sure we do it.
598 repackcmd="git repack $packopts -A -d -l $quiet $newdeltas $@"
599 [ -n "$show_progress" ] || \
600 repackcmd="{ LC_ALL=C $repackcmd 2>&1 || touch .gc_failed; } | LC_ALL=C grep -v 'disabling bitmap' || :"
601 eval "$repackcmd"
602 [ ! -e .gc_failed ] || exit 1
603 # These, if they exist, are now meaningless and need to be removed
604 rm -f gfi-packs .needsgc .svnpack .svnpackgc
605 allpacks="$(echo objects/pack/pack-$octet20.pack)"
606 curhead="$(cat HEAD)"
607 pkrf=
608 [ ! -e packed-refs ] || pkrf=packed-refs
609 eval "reposizek=$(( $(echo 0 $(du -k $pkrf $allpacks 2>/dev/null | LC_ALL=C awk '{print $1}') | \
610 LC_ALL=C sed -e 's/ / + /g') ))"
611 # The -A option to `git repack` may have caused some loose objects to pop
612 # out of their packs. We must make these objects group writable so that they
613 # can be freshened by other pushers. Technically we need only do this for
614 # push projects but to enable mirror projects to be more easily converted to
615 # push projects, we go ahead and do it for all projects.
616 { find objects/$octet -type f -name "$octet19" -print0 | xargs -0 chmod ug+w || :; } 2>/dev/null
617 # The git prune command does not take a -q or --quiet but started outputting
618 # 'Checking connectivity' progress messages in v1.7.9. However, we can
619 # suppress those by piping through cat as it only activates the progress
620 # messages when stderr is a tty. We only expire loose objects older than one
621 # day just in case there's some pending action (such as sending out a ref
622 # update) in progress that might want to examine them. This may leave us with
623 # loose objects. That's okay because at the next gc interval, we will always
624 # run gc if we see any loose objects regardless of whether or not we've seen
625 # any updates or we've received new linked objects from our parent. Note that
626 # in order to keep loose objects that just recently became unreferenced but
627 # have a very old modification date around we rely on some help from both the
628 # update.sh and hooks/pre-receive scripts. Furthermore, since Git v2.2.0
629 # (d3038d22 prune: keep objects reachable from recent objects) an unreachable
630 # object that would otherwise be pruned (because it's too old) will be kept
631 # alive by an unreachable object that refers to it that's not old enough to
632 # be pruned yet.
633 prunecmd='git prune --expire 1_day_ago'
634 [ -n "$show_progress" ] || \
635 prunecmd="{ $prunecmd 2>&1 || touch .gc_failed; } | cat"
636 eval "$prunecmd"
637 [ ! -e .gc_failed ] || exit 1
638 git update-server-info
640 # darcs:// mirrors have a xxx.log file that will grow endlessly
641 # if this is a mirror and the file exists, shorten it to 10000 lines
642 # also take this opportunity to optimize the darcs repo
643 if [ ! -e .nofetch ] && [ -n "$cfg_mirror" ]; then
644 url="$(config_get baseurl || :)"
645 case "$url" in darcs://*)
646 if [ -n "$cfg_mirror_darcs" ]; then
647 url="${url%/}"
648 basedarcs="$(basename "${url#darcs:/}")"
649 if [ -f "$basedarcs.log" ]; then
650 tail -n 10000 "$basedarcs.log" > "$basedarcs.log.$$"
651 mv -f "$basedarcs.log.$$" "$basedarcs.log"
653 if [ -d "$basedarcs.darcs" ]; then
655 cd "$basedarcs.darcs"
656 # without show_progress suppress non-error output
657 [ -n "$show_progress" ] || exec >/dev/null
658 # Note that this does not optimize _darcs/inventories/ :(
659 darcs optimize || :
663 esac
666 # Create a matching .bndl header file for the all-in-one pack we just created
667 # but only if we're not a fork (otherwise the bundle would not be complete)
668 # and we are running at least Git version 1.7.2 (pack_is_complete always fails otherwise)
669 if [ ! -s objects/info/alternates ] && [ -n "$var_have_git_172" ]; then
670 # There should only be one pack in $allpacks but if there was a
671 # simultaneous push...
672 # The one we just created will have a .idx and will NOT have a .keep
673 pkfound=
674 pkhead=
675 for pk in $allpacks; do
676 [ -s "$pk" ] || continue
677 pkbase="${pk%.pack}"
678 [ -s "$pkbase.idx" ] || continue
679 [ ! -e "$pkbase.keep" ] || continue
680 if pkhead="$(pack_is_complete "$PWD/$pk" "$PWD/packed-refs" "$curhead")"; then
681 pkfound="$pkbase"
682 break;
684 done
685 if [ -n "$pkfound" -a -n "$pkhead" ]; then
687 echo "# v2 git bundle"
688 LC_ALL=C sed -ne "/^$octet20 refs\/[^ $tab]*\$/ p" < packed-refs
689 echo "$pkhead HEAD"
690 echo ""
691 } > "$pkbase.bndl"
692 bndletag="$("$cfg_basedir/bin/rangecgi" --etag -m 1 "$pkbase.bndl" "$pkbase.pack" || :)"
693 bndlsha="$(printf '%s' "$bndletag" | git hash-object --stdin || :)"
694 if [ -n "$bndletag" ]; then
695 case "$bndlsha" in $octet20)
696 bndlshatrailer="${bndlsha#????????}"
697 bndlshaprefix="${bndlsha%$bndlshatrailer}"
698 bndlname="$(TZ=UTC date +%Y%m%d_%H%M%S)-${bndlshaprefix:-0}"
699 [ -d bundles ] || mkdir bundles
700 echo "${pkbase#objects/pack/}.bndl" > "bundles/$bndlname"
701 echo "${pkbase#objects/pack/}.pack" >> "bundles/$bndlname"
702 ln -s -f -n "$bndlname" bundles/latest
703 esac
708 # Record the size of this repo as the sum of its *.pack sizes as 1024-byte blocks
709 config_set_raw girocco.reposizek "${reposizek:-0}"
711 # We use $gcstart here to avoid a race where a push occurs during the gc itself
712 # and the next future gc could be incorrectly skipped if we used the current
713 # timestamp here instead
714 config_set lastgc "$gcstart"
715 rm -f "$lockf"
717 progress "- [$proj] garbage check (`date`)"