env: remove problematic Git environment variables
[girocco.git] / jobd / gc.sh
blob5d5775ea7ffe9feb9968bdb9f07e0558b435eade
1 #!/bin/sh
3 # NOTE: additional options can be passed to git repack by specifying
4 # them after the project name, for example:
5 # gc.sh my-project -f
7 . @basedir@/shlib.sh
9 set -e
11 if [ $# -lt 1 ]; then
12 echo "Usage: gc.sh projname [extra-repack-args]" >&2
13 exit 1
16 # packing options
17 packopts="--depth=50 --window=50 --window-memory=${var_window_memory:-1g}"
18 quiet=; [ -n "$show_progress" ] || quiet=-q
20 umask 002
21 [ "$cfg_permission_control" != "Hooks" ] || umask 000
22 clean_git_env
24 pidactive() {
25 if _result="$(kill -0 "$1" 2>&1)"; then
26 # process exists and we have permission to signal it
27 return 0
29 case "$_result" in *"not permitted"*)
30 # we do not have permission to signal the process
31 return 0
32 esac
33 # process does not exist
34 return 1
37 createlock() {
38 # A .lock file should only exist for much less than a second.
39 # If we see a stale lock file (> 1h old), remove it and then,
40 # just in case, wait 30 seconds for any process whose .lock
41 # we might have just removed (it's racy) to finish doing what
42 # should take much less than a second to do.
43 _stalelock="$(find "$1.lock" -maxdepth 1 -mmin +60 -print 2>/dev/null || :)"
44 if [ -n "$_stalelock" ]; then
45 rm -f "$_stalelock"
46 sleep 30
48 for _try in p p n; do
49 if (set -C; > "$1.lock") 2>/dev/null; then
50 echo "$1.lock"
51 return 0
53 # delay and try again
54 [ "$_try" != "p" ] || sleep 1
55 done
56 # cannot create lock file
57 return 1
60 # return true if there's more than one objects/pack-<sha>.pack file or
61 # ANY sha-1 files in objects
62 is_dirty() {
63 _packs=$(find objects/pack -type f -name "pack-$octet20.pack" -print | head -n 2 | LC_ALL=C wc -l)
64 if [ $_packs != 1 ] && [ $_packs != 0 ]; then
65 return 0
67 _objs=$(find objects/$octet -type f -name "$octet19" -print 2>/dev/null | head -n 1 | LC_ALL=C wc -l)
68 [ $_objs -ne 0 ]
71 # make sure combine-packs uses the correct Git executable
72 run_combine_packs() {
73 PATH="$var_git_exec_path:$cfg_basedir/bin:$PATH" @basedir@/jobd/combine-packs.sh "$@"
76 # combine the input pack(s) into a new pack (or possibly packs if packSizeLimit set)
77 # input pack names are read from standard input one per line delimited by the first
78 # ':', ' ' or '\n' character on the line (which allows gfi-packs to be read directly)
79 # all arguments, if any, are passed to pack-objects as additional options
80 # returns non-zero on failure AND creates .gc_failed in that case
81 combine_packs() {
82 rm -f .gc_failed
83 find objects/pack -maxdepth 1 -type f -name '*.zap*' -print0 | xargs -0 rm -f
84 run_combine_packs --replace "$@" $packopts --all-progress-implied $quiet --non-empty || {
85 >.gc_failed
86 return 1
88 return 0
91 # if the current directory is_gfi_mirror then repack all packs listed in gfi-packs
92 repack_gfi_packs() {
93 [ -n "$gfi_mirror" ] || return 0
94 [ -d objects/pack ] || { rm -f gfi-packs; return 0; }
95 progress "~ [$proj] redeltifying poor quality git fast-import packs"
96 combine_packs --ignore-missing --no-reuse-delta <gfi-packs
97 rm -f gfi-packs
98 return 0
101 # combine small packs into larger pack(s)
102 # we avoid any keep, bndl or bitmap packs
103 # if the optional argument is non-empty even a single small pack will be redeltad
104 combine_small_packs() {
105 _didprogress=
106 _minsmallpacks=2
107 if [ -n "$1" ] && [ -n "$noreusedeltaopt" ]; then
108 _minsmallpacks=1
110 _lpo="--exclude-no-idx --exclude-keep --exclude-bitmap --exclude-bndl --quiet"
111 _lpo="$_lpo --object-limit $var_redelta_threshold objects/pack"
112 while
113 _cnt="$(list_packs --count $_lpo || :)"
114 test "${_cnt:-0}" -ge $_minsmallpacks
116 [ -n "$_didprogress" ] || {
117 progress "~ [$proj] combining small packs into a single larger pack"
118 _didprogress=1
120 _newp="$(list_packs $_lpo | combine_packs --names $noreusedeltaopt)"
121 _newc="$(echo $(echo "$_newp" | LC_ALL=C wc -w))"
122 # be paranoid and exit the loop if we haven't reduced the number of packs
123 [ $_newc -lt $_cnt ] || break
124 _minsmallpacks=2
125 done
126 return 0
129 # Unfortunately git-svn lacks the ability to store newly fetched revisions as a pack.
130 # However, the fetch code conveniently sets .svnpack just before it runs git-svn fetch
131 # so that it's easy to find all the objects that have been fetched by git-svn and
132 # combine them into a pack. The --no-reuse-delta option is meaningless here since
133 # everything to be packed is a loose object and therefore not a delta so deltification
134 # will always take place.
135 make_svn_pack() {
136 [ -f .svnpack ] && [ -n "$svn_mirror" ] || return 0
137 rm -f .svnpackgc
138 mv -f .svnpack .svnpackgc
139 progress "~ [$proj] combining loose git-svn objects into a pack"
140 _newp="$(find objects/$octet -maxdepth 1 -type f -newer .svnpackgc -name "$octet19" -print 2>/dev/null |
141 LC_ALL=C awk -F / '{print $2 $3}' |
142 run_combine_packs --objects --names $packopts --incremental --all-progress-implied $quiet --non-empty)" || {
143 mv -f .svnpackgc .svnpack
144 >.gc_failed
145 return 1
147 if [ -n "$_newp" ]; then
148 # remove the now-redundant loose objects -- this is always safe
149 # even during a concurrent push because a reprepare_packed_git
150 # will be triggered if an object that should be there is not
151 # found thereby finding it in the new pack instead
152 git prune-packed $quiet
154 rm -f .svnpackgc
157 # HEADSHA="$(pack_is_complete /full/path/to/some.pack /full/path/to/packed-refs "$(cat HEAD)")"
158 pack_is_complete() {
159 # Must have a matching .idx file and a non-empty packed-refs file
160 [ -s "${1%.pack}.idx" ] || return 1
161 [ -s "$2" ] || return 1
162 _headsha=
163 case "$3" in
164 $octet20)
165 _headsha="$3"
167 "ref: refs/"?*|"ref:refs/"?*|"refs/"?*)
168 _headmatch="${3#ref:}"
169 _headmatch="${_headmatch# }"
170 _headmatchpat="$(echo "$_headmatch" | LC_ALL=C sed -e 's/\([.$]\)/\\\1/g')"
171 _headsha="$(LC_ALL=C grep -e "^$octet20 $_headmatchpat\$" < "$2" | \
172 LC_ALL=C cut -d ' ' -f 1)"
173 case "$_headsha" in $octet20) :;; *)
174 return 1
175 esac
178 # bad HEAD
179 return 1
180 esac
181 rm -rf pack_is_complete_test
182 mkdir pack_is_complete_test
183 mkdir pack_is_complete_test/refs
184 mkdir pack_is_complete_test/objects
185 mkdir pack_is_complete_test/objects/pack
186 echo "$_headsha" > pack_is_complete_test/HEAD
187 ln -s "$1" pack_is_complete_test/objects/pack/
188 ln -s "${1%.pack}.idx" pack_is_complete_test/objects/pack/
189 ln -s "$2" pack_is_complete_test/packed-refs
190 _count="$(git --git-dir=pack_is_complete_test rev-list --count --all 2>/dev/null || :)"
191 rm -rf pack_is_complete_test
192 [ -n "$_count" ] || return 1
193 [ "$_count" -gt 0 ] 2>/dev/null || return 1
194 echo "$_headsha"
197 # On return a "$lockf" will have been created that must be removed when gc is done
198 lock_gc() {
199 # be compatibile with gc.pid file from newer Git releases
200 lockf=gc.pid
201 hn="$(hostname)"
202 active=
203 if [ "$(createlock "$lockf")" ]; then
204 # If $lockf is:
205 # 1) less than 12 hours old
206 # 2) contains two fields (pid hostname) NO trailing NL
207 # 3) the hostname is different OR the pid is still alive
208 # then we exit as another active process is holding the lock
209 if [ "$(find "$lockf" -maxdepth 1 -mmin -720 -print 2>/dev/null)" ]; then
210 apid=
211 ahost=
212 read -r apid ahost ajunk < "$lockf" || :
213 if [ "$apid" ] && [ "$ahost" ]; then
214 if [ "$ahost" != "$hn" ] || pidactive "$apid"; then
215 active=1
219 else
220 echo >&2 "[$proj] unable to create gc.pid.lock file"
221 exit 1
223 if [ -n "$active" ]; then
224 rm -f "$lockf.lock"
225 echo >&2 "[$proj] gc already running on machine '$ahost' pid '$apid'"
226 exit 1
228 printf "%s %s" "$$" "$hn" > "$lockf.lock"
229 chmod 0664 "$lockf.lock"
230 mv -f "$lockf.lock" "$lockf"
233 # Remove any crud that's been left behind by interrupted operations
234 # that did not clean up after themselves
235 remove_crud() {
236 # Remove any existing FETCH_HEAD
237 # There can only be a FETCH_HEAD if we've been fetching, not if we've been
238 # receiving pushes (those never create a FETCH_HEAD).
239 # And if we're fetching because we're a mirror, we know we're not fetching right
240 # now since jobd.pl never runs a project's fetch simultaneously with its gc.
241 # Therefore any existing FETCH_HEAD is junk. And it may be many megabytes if
242 # there were a lot of refs.
243 rm -f FETCH_HEAD
245 # Remove any stale pack remnants that are more than an hour old.
246 # Stale pack fragments are defined as any pack-<sha1>.ext where .ext is NOT
247 # .pack AND the corresponding .pack DOES NOT exist. A bunch of stale
248 # pack-<sha1>.idx files without their corresponding .pack files are worthless
249 # and just waste space. Normally there shouldn't be any remnants but actually
250 # this can happen when things are interrupted at just the wrong time.
251 # Note that the objects/pack directory is created by git init and should
252 # always exist.
253 find objects/pack -maxdepth 1 -type f -mmin +60 -name "pack-$octet20.?*" -print | \
254 LC_ALL=C sed -e 's/^objects\/pack\/pack-//; s/\..*$//' | LC_ALL=C sort -u | \
255 while read packsha; do
256 [ ! -e "objects/pack/pack-$packsha.pack" ] || continue
257 rm -f "objects/pack/pack-$packsha".?*
258 done
260 # Remove any stale pack .keep files that are more than 12 hours old.
261 # We don't do anything to create any permanent pack .keep files, so they must
262 # be remnants from some failed push or something. Removing the .keep will
263 # allow the pack to be properly repacked.
264 find objects/pack -maxdepth 1 -type f -mmin +720 -name "pack-$octet20.keep" -print0 | xargs -0 rm -f
266 # Remove any stale tmp_pack_*, tmp_idx_*, tmp_bitmap_*, packtmp-* or .tmp-*-pack files
267 # that are more than 12 hours old.
268 find objects/pack -maxdepth 1 -type f -mmin +720 \( \
269 -name "tmp_pack_?*" -o -name "tmp_idx_?*" -o -name "tmp_bitmap_?*" -o \
270 -name "packtmp-?*" -o -name ".tmp-?*-pack" \
271 \) -print0 | xargs -0 rm -f
273 # Remove any stale shallow_* files that are more than 12 hours old.
274 # These can be left behind by Git >= 1.8.4.2 and < 2.0.0 when a client
275 # requests a shallow clone.
276 find . -maxdepth 1 -type f -mmin +720 -name "shallow_?*" -print0 | xargs -0 rm -f
278 # Remove any stale *.temp files in the objects area that are more than 12 hours old.
279 # This can be stale sha1.temp, or stale *.pack.temp so we kill all stale *.temp.
280 find objects -type f -mmin +720 -name "*.temp" -print0 | xargs -0 rm -f
282 # Remove any stale *.lock files in the htmlcache area that might have been left
283 # behind after an abnormal exit during an attempt to update a cached file and
284 # are more than 1 hour old.
285 ! [ -d htmlcache ] || find htmlcache -type f -mmin +60 -name "*.lock" -print0 | xargs -0 rm -f
287 # Remove any stale git-svn temp files that are more than 12 hours old.
288 # The git-svn process creates temp files with random 10 character names
289 # in the root of $GIT_DIR. Unfortunately they do not have a recognizable
290 # prefix, so we just have to kill any files with a 10-character name. We
291 # do this only for git-svn mirrors. All characters are chosen from
292 # [A-Za-z0-9_] so we can at least check that and fortunately the only
293 # collision is 'FETCH_HEAD' but that shouldn't matter.
294 # There may also be temp files with a Git_ prefix as well.
295 if [ -n "$svn_mirror" ]; then
296 _randchar='[A-Za-z0-9_]'
297 _randchar2="$_randchar$_randchar"
298 _randchar4="$_randchar2$_randchar2"
299 _randchar10="$_randchar4$_randchar4$_randchar2"
300 find . -maxdepth 1 -type f -mmin +720 -name "$_randchar10" -print0 | xargs -0 rm -f
301 find . -maxdepth 1 -type f -mmin +720 -name "Git_*" -print0 | xargs -0 rm -f
304 # Remove any stale fast_import_crash_<pid> files that are more than 3 days old.
305 if [ -n "$gfi_mirror" ]; then
306 find . -maxdepth 1 -type f -mmin +4320 -name "fast_import_crash_?*" -print0 | xargs -0 rm -f
311 ## Garbage Collection Types
313 ## There are two kinds of possible garbage collection (gc) operations:
315 ## 1. A normal, full gc
316 ## 2. A "mini" gc
318 ## If the full garbage collection interval has expired (or gc has never been
319 ## run), then a normal, full gc will take place. Otherwise, a "mini" gc will
320 ## take place if the file .needsgc exists.
322 ## A "mini" gc is similar to "git gc --auto" in that it may not end up actually
323 ## doing anything unless the right conditions are present so it's not a burden
324 ## to run it often. If the file .needsgc exists, a "mini" gc will occur at
325 ## the next opportunity.
327 ## Note, however, that the .nogc file suppresses ALL gc activity (normal or mini).
330 proj="${1%.git}"
331 shift
332 cd "$cfg_reporoot/$proj.git"
333 [ -d objects/pack ] || { rm -f gfi-packs; mkdir -p objects/pack; }
334 mirror_url="$(get_mirror_url)"
335 svn_mirror=
336 ! is_svn_mirror_url "$mirror_url" || svn_mirror=1
337 gfi_mirror=
338 if [ -f gfi-packs -a -s gfi-packs ] && is_gfi_mirror_url "$mirror_url"; then
339 gfi_mirror=1
342 # If git config --bool --get girocco.redelta is explicitly false then automatic
343 # redelta when there are less than $var_redelta_threshold objects will be suppressed.
344 # On the other hand, if git config --get girocco.redelta is "always" then, on a full
345 # gc only, for the final repack, deltas will always be recomputed.
346 # This can be set on a per-project basis to avoid unusual pathological gc behavior.
347 # Setting this will hurt efficiency of the affected repository.
348 # Note that fast-import packs ALWAYS get new deltas regardless of this setting.
349 noreusedeltaopt="--no-reuse-delta"
350 [ "$(git config --bool --get girocco.redelta 2>/dev/null || :)" != "false" ] || noreusedeltaopt=
351 alwaysredelta=
352 [ "$(git config --get girocco.redelta 2>/dev/null || :)" != "always" ] || alwaysredelta=1
354 trap 'e=$?; rm -f .gc_in_progress; if [ $e != 0 ]; then echo "gc failed dir: $PWD" >&2; fi' EXIT
355 trap 'exit 130' INT
356 trap 'exit 143' TERM
358 # date -R is linux-only, POSIX equivalent is '+%a, %d %b %Y %T %z'
359 datefmt='+%a, %d %b %Y %T %z'
361 isminigc=
362 if check_interval lastgc $cfg_min_gc_interval; then
363 if [ -e .needsgc ]; then
364 isminigc=1
365 else
366 progress "= [$proj] garbage check skip (last at $(config_get lastgc))"
367 exit 0
370 if [ -e .nogc ]; then
371 progress "x [$proj] garbage check disabled"
372 exit 0
375 if [ -n "$isminigc" ]; then
376 # Perform a "mini" gc
377 # Note that .delaygc is ignored here as that's only intended for full gc
378 lock_gc
379 rm -f .allowgc .needsgc
380 remove_crud
381 miniactive=
382 if [ -f .svnpack ] && [ -n "$svn_mirror" ]; then
383 miniactive=1
384 progress "+ [$proj] mini garbage check (`date`)"
385 make_svn_pack
387 if [ -z "$cfg_delay_gfi_redelta" ] && [ -n "$gfi_mirror" ]; then
388 # $Girocco::Config::delay_gfi_redelta is false, force redeltification now
389 if [ -z "$miniactive" ]; then
390 miniactive=1
391 progress "+ [$proj] mini garbage check (`date`)"
393 repack_gfi_packs
395 # If there aren't at least 10 non-keep, non-bitmap, non-bndl packs then
396 # don't actually process them yet
397 lpo="--exclude-no-idx --exclude-keep --exclude-bitmap --exclude-bndl --quiet"
398 packcnt="$(list_packs --count $lpo objects/pack || :)"
399 if [ "${packcnt:-0}" -ge 10 ]; then
400 if [ -z "$miniactive" ]; then
401 miniactive=1
402 progress "+ [$proj] mini garbage check (`date`)"
404 if [ -n "$gfi_mirror" ]; then
405 repack_gfi_packs
406 packcnt="$(list_packs --count $lpo objects/pack || :)"
408 # if repack_gfi_packs dropped the pack count to < 10 don't combine
409 if [ "${packcnt:-0}" -ge 10 ]; then
410 combine_small_packs
411 packcnt="$(list_packs --count $lpo objects/pack || :)"
413 # if we still have more than 10 packs trigger a full gc
414 if [ "${packcnt:-0}" -ge 10 ]; then
415 # We shouldn't be in a .delaygc state at this point, but if
416 # we are then nuke it because we really need a full gc now
417 rm -f .delaygc
418 git config --unset gitweb.lastgc
419 rm -f "$lockf"
420 progress "- [$proj] mini garbage check triggering full gc too many packs (`date`)"
421 exit 0
424 rm -f "$lockf"
425 if [ -n "$miniactive" ]; then
426 git update-server-info
427 progress "- [$proj] mini garbage check (`date`)"
428 else
429 progress "= [$proj] mini garbage check nothing but crud removal to do (`date`)"
431 exit 0
434 # Avoid unnecessary garbage collections:
435 # 1. If lastreceive is set and is older than lastgc
436 # -AND-
437 # 2. We are not a fork (! -s alternates) -OR- lastparentgc is older than lastgc
439 # If lastgc is NOT set or lastreceive is NOT set we MUST run gc
440 # If we are a fork and lastparentgc is NOT set we MUST run gc
442 # If the repo is dirty after removing any crud we MUST run gc
444 gcstart="$(date "$datefmt")"
445 skipgc=
446 isfork=
447 [ -s objects/info/alternates ] && isfork=1
448 lastparentgcsecs=
449 [ -n "$isfork" ] && lastparentgcsecs="$(config_get_date_seconds lastparentgc || :)"
450 lastreceivesecs=
451 if lastreceivesecs="$(config_get_date_seconds lastreceive)" && \
452 lastgcsecs="$(config_get_date_seconds lastgc)" && \
453 [ $lastreceivesecs -lt $lastgcsecs ]; then
454 # We've run gc since we last received, so maybe we can skip,
455 # check if not fork or fork and lastparentgc < lastgc
456 if [ -n "$isfork" ]; then
457 if [ -n "$lastparentgcsecs" ] && \
458 [ $lastparentgcsecs -lt $lastgcsecs ]; then
459 # We've run gc since our parent ran gc so we can skip
460 skipgc=1
462 else
463 # We don't have any alternates (we're not a forK) so we can skip
464 skipgc=1
468 # Prevent any other simultaneous gc operations
469 lock_gc
471 # At this point, if .allowgc exists, it's now crud to be removed
472 rm -f .allowgc
474 # Ideally we would do this in post-receive, but that would mean duplicating the
475 # logic so it's available in the chroot jail and that's highly undesirable
476 # Instead, since the first gc will be triggered immediately following the first
477 # push, we do the check here as it's quick and harmless if HEAD is already valid
478 check_and_set_head || :
480 # Always get rid of crud
481 remove_crud
483 # Run 'git svn gc' now for svn mirrors
484 if [ -n "$svn_mirror" ]; then
485 git svn gc || :
488 # Skip the actual gc if .delaygc is set
489 if [ -e .delaygc ]; then
490 progress "x [$proj] garbage check delayed (except for crud removal)"
491 rm -f "$lockf"
492 exit 0
495 # Do not skip gc if the repo is dirty
496 if [ -n "$skipgc" ] && ! is_dirty; then
497 progress "= [$proj] garbage check nothing but crud removal to do (`date`)"
498 config_set lastgc "$gcstart"
499 rm -f "$lockf"
500 exit 0
503 bumptime=
504 if [ -n "$isfork" ] && [ -z "$lastparentgcsecs" ]; then
505 # set lastparentgc and then update gcstart to be at least 1 second later
506 config_set lastparentgc "$gcstart"
507 bumptime=1
509 if [ -z "$lastreceivesecs" ]; then
510 # set lastreceive and then update gcstart to be at least 1 second later
511 config_set lastreceive "$gcstart"
512 bumptime=1
514 if [ -n "$bumptime" ]; then
515 sleep 1
516 gcstart="$(date "$datefmt")"
519 progress "+ [$proj] garbage check (`date`)"
521 newdeltas=
522 [ -z "$alwaysredelta" ] || newdeltas=-f
523 if [ -z "$newdeltas" ] && [ -n "$gfi_mirror" ]; then
524 if [ $(list_packs --exclude-no-idx --count objects/pack) -le \
525 $(list_packs --exclude-no-idx --count --quiet --only gfi-packs) ]; then
526 # Don't bother with repack_gfi_packs since everything's being repacked
527 newdeltas=-f
530 if [ -z "$newdeltas" ] && [ -n "$noreusedeltaopt" ] && \
531 [ $(list_packs --exclude-no-idx --count-objects objects/pack) -le $var_redelta_threshold ]; then
532 # There aren't enough objects to worry about so just redelta to get the best pack
533 newdeltas=-f
535 if [ -z "$newdeltas" ]; then
536 # Since we're not going to recompute deltas overall, we need to do the
537 # "mini" maintenance so that we can get more optimal deltas
538 [ -z "$noreusedeltaopt" ] || make_svn_pack
539 repack_gfi_packs
540 force_single_pack_redelta=
541 [ -n "$gfi_mirror" ] || [ -n "$svn_mirror" ] || force_single_pack_redelta=1
542 [ -z "$noreusedeltaopt" ] || combine_small_packs $force_single_pack_redelta
546 ## Safe Pruning In Forks
548 ## We are about to perform garbage collection. We do NOT use the "git gc"
549 ## command directly as it does not provide enough control over the fine details
550 ## that we require. However, we DO maintain a "gc.pid" file during our garbage
551 ## collection so that a simultaneous "git gc" by an administrator will be
552 ## blocked (and similarly we refuse to start garbage collection if we cannot
553 ## create the "gc.pid" file). When we say "gc" in the below description we are
554 ## referring to our "gc.sh" script, NOT the "git gc" command.
556 ## If the project we are running garbage collection (gc) on has any forks we
557 ## must be careful not to remove any objects that while no longer referenced by
558 ## this project (the parent) are still referenced by one or more forks (the
559 ## children) otherwise the children will become corrupt and we can't abide
560 ## corrupt children.
562 ## One way to accomplish this is to simply hard-link all currently existing
563 ## loose objects and packs in the parent into all the children that refer to the
564 ## parent (via a line in their objects/info/alternates file) before beginning
565 ## the gc operation and then relying on a subsequent gc in the child to clean up
566 ## any excess objects/packs. We used to use this strategy but it's very
567 ## inefficient because:
569 ## 1. The disk space used by the old pack(s)/object(s) will not be reclaimed
570 ## until all children (and their children, if any) run gc by which time
571 ## it's quite possible the topmost parent will have run gc again and
572 ## hard-linked yet another old pack down to its children (not to mention
573 ## loose objects).
575 ## 2. As we are now using the "-A" option with "git repack", any new objects
576 ## in the parent that are not referenced by children will continually get
577 ## exploded out of the hard-linked pack in the children whenever the
578 ## children run gc.
580 ## 3. To avoid suboptimal and/or unnecessarily many packs being hard-linked
581 ## into child forks, we must run the "mini" gc maintenance before we
582 ## perform the hard-linking into the children which provides yet another
583 ## source of inefficiency.
585 ## Since we are using the "-A" option to "git repack" (that was not always the
586 ## case) to guarantee we can access old ref values for long enough to send out
587 ## a meaningful mail.sh notification, we now have another, more efficient,
588 ## option available to prevent corruption of child forks that continue to refer
589 ## to objects that are no longer reachable from any ref in the parent.
591 ## The only things that need be copied (or hard-linked) into the child fork(s)
592 ## are those objects that have become unreachable from any ref in the parent.
593 ## They are the only things that could ever be removed by "git prune" and
594 ## therefore the only things we need to prevent the loss of in order to avoid
595 ## corruption of the child fork(s).
597 ## Therefore we now use the following strategy instead to avoid excessive disk
598 ## use and lots of unnecessary loose objects in child forks:
600 ## 1. Run "git repack -A -d -l" in the parent BEFORE doing anything about
601 ## child forks.
603 ## 2. Hard-link all remaining existing loose objects in the parent into the
604 ## immediate child forks.
606 ## 3. Now run "git prune" in the parent.
608 ## With this new strategy we avoid the need to run any "mini" gc maintenance
609 ## before copying (or hard-linking) anything down to the child forks.
610 ## Furthermore, only when the parent performs a non-fast-forward update will
611 ## anything ever be transferred to the children leaving them unperturbed in the
612 ## vast majority of cases. Finally, even if the parent references objects the
613 ## children do not, those objects will no longer continually end up in the
614 ## children as unreachable loose objects after the children run gc.
617 git pack-refs --all
618 touch .gc_in_progress
619 rm -f .gc_failed bundles/*
620 rm -f objects/pack/pack-*.bndl
621 # We use the -A option with git repack so that unreachable objects can live
622 # on for a time as loose objects. This is particularly helpful if we just
623 # happen to be in the process of sending out a ref update for a ref that was
624 # force updated and the old ref value would have otherwise been removed by
625 # repack because it was now unreachable. Admittedly the window for gc to run
626 # and do that before we manage to send out the ref update is not large, but
627 # it would not be difficult to create such a situation. Unfortunately, when
628 # Git unpacks these unreachable objects it will give them the modification
629 # time of the *.pack file they came out of. This could be very, very old.
630 # If that happens, the subsequent git prune --expire some_time_ago will still
631 # remove the object(s) and our pending ref update will still lose out.
632 # To prevent this from happening and to get the behavior we want, we now
633 # touch the modification time of all pack-<sha>.pack files so that any
634 # loosened objects get a current time. Git does not provide any other
635 # mechanism to do this. We do not want to just touch all loose objects
636 # left after the repack because that would cause objects that were loosened
637 # previously to live on which we definitely do not want.
638 list_packs --exclude-no-idx objects/pack | xargs touch -c 2>/dev/null || :
639 # We wish to keep deltas from our last full pack so if we're not redeltaing
640 # then make sure the .pack associated with the .bitmap has a newer mod time
641 # (If there is no .bitmap then touch the pack with the most objects instead.)
642 if [ -z "$newdeltas" ]; then
643 bmpack="$(list_packs --exclude-no-bitmap --exclude-no-idx --max-matches 1 objects/pack)"
644 [ -n "$bmpack" ] || bmpack="$(list_packs --exclude-no-idx --max-matches 1 --object-limit -1 --include-boundary objects/pack)"
645 if [ -n "$bmpack" ] && [ -f "$bmpack" -a -s "$bmpack" ]; then
646 sleep 1
647 touch -c "$bmpack" 2>/dev/null || :
650 # The git repack command only supports bitmaps if all objects are being packed.
651 # While it is theoretically possible that a project with a non-empty alternates
652 # file ends up packing all objects (because it does not actually use any of the
653 # objects found in the alternates), it's very unlikely. And, in the unlikely
654 # event that did occur, clients would see a message about only using one bitmap
655 # because Git can only use one bitmap at a time and at least one of the
656 # alternates is bound to have a bitmap. Therefore if we see a non-empty
657 # alternates file, we disable writing bitmaps which avoids the warning and any
658 # possibility of a client warning as well.
659 nobm=
660 [ -z "$var_have_git_172" ] || ! [ -s objects/info/alternates ] || \
661 nobm='-c repack.writebitmaps=false -c pack.writebitmaps=false'
662 progress "~ [$proj] running full gc repack${nobm:+ (bitmaps disabled)}"
663 git $nobm repack $packopts -A -d -l $quiet $newdeltas $@
664 [ ! -e .gc_failed ] || exit 1
665 # These, if they exist, are now meaningless and need to be removed
666 rm -f gfi-packs .needsgc .svnpack .svnpackgc
667 allpacks="$(echo objects/pack/pack-$octet20.pack)"
668 curhead="$(cat HEAD)"
669 pkrf=
670 [ ! -e packed-refs ] || pkrf=packed-refs
671 eval "reposizek=$(( $(echo 0 $(du -k $pkrf $allpacks 2>/dev/null | LC_ALL=C awk '{print $1}') | \
672 LC_ALL=C sed -e 's/ / + /g') ))"
673 git update-server-info
674 # The -A option to `git repack` may have caused some loose objects to pop
675 # out of their packs. We must make these objects group writable so that they
676 # can be freshened by other pushers. Technically we need only do this for
677 # push projects but to enable mirror projects to be more easily converted to
678 # push projects, we go ahead and do it for all projects.
679 { find objects/$octet -type f -name "$octet19" -print0 | xargs -0 chmod ug+w || :; } 2>/dev/null
681 if has_forks "$proj"; then
682 progress "~ [$proj] hard-linking loose objects into immediate child forks"
683 # We have to update the lastparentgc time in the child forks even if they do not get any
684 # new "loose objects" because they need to run gc just in case the parent now has some
685 # objects that used to only be in the child so they can be removed from the child.
686 # For example, a "patch" might be developed first in a fork and then later accepted into
687 # the parent in which case the objects making up the patch in the child fork are now
688 # redundant (since they're now in the parent as well) and need to be removed from the
689 # child fork which can only happen if the child fork runs gc.
690 shbin="${cfg_posix_sh_bin:-/bin/sh}"
691 forkdir="$proj"
692 # It is enough to copy objects just one level down and get_repo_list
693 # takes a regular expression (which is automatically prefixed with '^')
694 # so we can easily match forks exactly one level down from this project
695 get_repo_list "$forkdir/[^/]*:" |
696 while read fork; do
697 # Ignore forks that do not exist or are symbolic links
698 [ ! -L "$cfg_reporoot/$fork.git" -a -d "$cfg_reporoot/$fork.git" ] || \
699 continue
700 # Or do not have a non-zero length alternates file
701 [ -s "$cfg_reporoot/$fork.git/objects/info/alternates" ] || \
702 continue
703 # Match objects in parent project
704 for d in objects/$octet; do
705 [ "$d" != "objects/$octet" ] || continue
706 mkdir -p "$cfg_reporoot/$fork.git/$d"
707 find "$d" -maxdepth 1 -type f -name "$octet19" -print0 |
708 xargs -0 "$shbin" -c 'ln -f "$@" '"'$cfg_reporoot/$fork.git/$d/'" sh || :
709 done
710 # Update the fork's lastparentgc date (must be current, not $gcstart)
711 git --git-dir="$cfg_reporoot/$fork.git" config \
712 gitweb.lastparentgc "$(date "$datefmt")"
713 done
716 # The git prune command does not take a -q or --quiet but started outputting
717 # 'Checking connectivity' progress messages in v1.7.9. However, we can
718 # suppress those by piping through cat as it only activates the progress
719 # messages when stderr is a tty. We only expire loose objects older than one
720 # day just in case there's some pending action (such as sending out a ref
721 # update) in progress that might want to examine them. This may leave us with
722 # loose objects. That's okay because at the next gc interval, we will always
723 # run gc if we see any loose objects regardless of whether or not we've seen
724 # any updates or we've received new linked objects from our parent. Note that
725 # in order to keep loose objects that just recently became unreferenced but
726 # have a very old modification date around we rely on some help from both the
727 # update.sh and hooks/pre-receive scripts. Furthermore, since Git v2.2.0
728 # (d3038d22 prune: keep objects reachable from recent objects) an unreachable
729 # object that would otherwise be pruned (because it's too old) will be kept
730 # alive by an unreachable object that refers to it that's not old enough to
731 # be pruned yet.
732 prunecmd='git prune --expire 1_day_ago'
733 [ -n "$show_progress" ] || \
734 prunecmd="{ $prunecmd 2>&1 || touch .gc_failed; } | cat"
735 progress "~ [$proj] pruning expired unreachable loose objects"
736 eval "$prunecmd"
737 [ ! -e .gc_failed ] || exit 1
739 # darcs:// mirrors have a xxx.log file that will grow endlessly
740 # if this is a mirror and the file exists, shorten it to 10000 lines
741 # also take this opportunity to optimize the darcs repo
742 if [ ! -e .nofetch ] && [ -n "$cfg_mirror" ]; then
743 url="$(config_get baseurl || :)"
744 case "$url" in darcs://*)
745 if [ -n "$cfg_mirror_darcs" ]; then
746 url="${url%/}"
747 basedarcs="$(basename "${url#darcs:/}")"
748 if [ -f "$basedarcs.log" ]; then
749 tail -n 10000 "$basedarcs.log" > "$basedarcs.log.$$"
750 mv -f "$basedarcs.log.$$" "$basedarcs.log"
752 if [ -d "$basedarcs.darcs" ]; then
754 cd "$basedarcs.darcs"
755 # without show_progress suppress non-error output
756 [ -n "$show_progress" ] || exec >/dev/null
757 # Note that this does not optimize _darcs/inventories/ :(
758 darcs optimize || :
762 esac
765 # Create a matching .bndl header file for the all-in-one pack we just created
766 # but only if we're not a fork (otherwise the bundle would not be complete)
767 # and we are running at least Git version 1.7.2 (pack_is_complete always fails otherwise)
768 if [ ! -s objects/info/alternates ] && [ -n "$var_have_git_172" ]; then
769 # There should only be one pack in $allpacks but if there was a
770 # simultaneous push...
771 # The one we just created will have a .idx and will NOT have a .keep
772 progress "~ [$proj] creating downloadble bundle header"
773 pkfound=
774 pkhead=
775 for pk in $allpacks; do
776 [ -s "$pk" ] || continue
777 pkbase="${pk%.pack}"
778 [ -s "$pkbase.idx" ] || continue
779 [ ! -e "$pkbase.keep" ] || continue
780 if pkhead="$(pack_is_complete "$PWD/$pk" "$PWD/packed-refs" "$curhead")"; then
781 pkfound="$pkbase"
782 break;
784 done
785 if [ -n "$pkfound" -a -n "$pkhead" ]; then
787 echo "# v2 git bundle"
788 LC_ALL=C sed -ne "/^$octet20 refs\/[^ $tab]*\$/ p" < packed-refs
789 echo "$pkhead HEAD"
790 echo ""
791 } > "$pkbase.bndl"
792 bndletag="$("$cfg_basedir/bin/rangecgi" --etag -m 1 "$pkbase.bndl" "$pkbase.pack" || :)"
793 bndlsha="$(printf '%s' "$bndletag" | git hash-object --stdin || :)"
794 if [ -n "$bndletag" ]; then
795 case "$bndlsha" in $octet20)
796 bndlshatrailer="${bndlsha#????????}"
797 bndlshaprefix="${bndlsha%$bndlshatrailer}"
798 bndlname="$(TZ=UTC date +%Y%m%d_%H%M%S)-${bndlshaprefix:-0}"
799 [ -d bundles ] || mkdir bundles
800 echo "${pkbase#objects/pack/}.bndl" > "bundles/$bndlname"
801 echo "${pkbase#objects/pack/}.pack" >> "bundles/$bndlname"
802 ln -s -f -n "$bndlname" bundles/latest
803 esac
808 # Record the size of this repo as the sum of its *.pack sizes as 1024-byte blocks
809 config_set_raw girocco.reposizek "${reposizek:-0}"
811 # We use $gcstart here to avoid a race where a push occurs during the gc itself
812 # and the next future gc could be incorrectly skipped if we used the current
813 # timestamp here instead
814 config_set lastgc "$gcstart"
815 rm -f "$lockf"
817 progress "- [$proj] garbage check (`date`)"