forks: revert to hard-linking objects during gc
[girocco.git] / jobd / gc.sh
blob3c60c23d5853bb14155c4d344c36b25fce15b978
1 #!/bin/sh
3 # NOTE: additional options can be passed to git repack by specifying
4 # them after the project name, for example:
5 # gc.sh my-project -f
7 . @basedir@/shlib.sh
9 set -e
11 if [ $# -lt 1 ]; then
12 echo "Usage: gc.sh projname [extra-repack-args]" >&2
13 exit 1
16 # packing options
17 packopts="--depth=50 --window=50 --window-memory=${var_window_memory:-1g}"
18 quiet=; [ -n "$show_progress" ] || quiet=-q
20 umask 002
21 [ "$cfg_permission_control" != "Hooks" ] || umask 000
23 pidactive() {
24 if _result="$(kill -0 "$1" 2>&1)"; then
25 # process exists and we have permission to signal it
26 return 0
28 case "$_result" in *"not permitted"*)
29 # we do not have permission to signal the process
30 return 0
31 esac
32 # process does not exist
33 return 1
36 createlock() {
37 # A .lock file should only exist for much less than a second.
38 # If we see a stale lock file (> 1h old), remove it and then,
39 # just in case, wait 30 seconds for any process whose .lock
40 # we might have just removed (it's racy) to finish doing what
41 # should take much less than a second to do.
42 _stalelock="$(find "$1.lock" -maxdepth 1 -mmin +60 -print 2>/dev/null || :)"
43 if [ -n "$_stalelock" ]; then
44 rm -f "$_stalelock"
45 sleep 30
47 for _try in p p n; do
48 if (set -C; > "$1.lock") 2>/dev/null; then
49 echo "$1.lock"
50 return 0
52 # delay and try again
53 [ "$_try" != "p" ] || sleep 1
54 done
55 # cannot create lock file
56 return 1
59 # return true if there's more than one objects/pack-<sha>.pack file or
60 # ANY sha-1 files in objects
61 is_dirty() {
62 _packs=$(find objects/pack -type f -name "pack-$octet20.pack" -print | head -n 2 | LC_ALL=C wc -l)
63 if [ $_packs != 1 ] && [ $_packs != 0 ]; then
64 return 0
66 _objs=$(find objects/$octet -type f -name "$octet19" -print 2>/dev/null | head -n 1 | LC_ALL=C wc -l)
67 [ $_objs -ne 0 ]
70 # make sure combine-packs uses the correct Git executable
71 run_combine_packs() {
72 PATH="$var_git_exec_path:$cfg_basedir/bin:$PATH" @basedir@/jobd/combine-packs.sh "$@"
75 # combine the input pack(s) into a new pack (or possibly packs if packSizeLimit set)
76 # input pack names are read from standard input one per line delimited by the first
77 # ':', ' ' or '\n' character on the line (which allows gfi-packs to be read directly)
78 # all arguments, if any, are passed to pack-objects as additional options
79 # returns non-zero on failure AND creates .gc_failed in that case
80 combine_packs() {
81 rm -f .gc_failed
82 find objects/pack -maxdepth 1 -type f -name '*.zap*' -print0 | xargs -0 rm -f
83 run_combine_packs --replace "$@" $packopts --all-progress-implied $quiet --non-empty || {
84 >.gc_failed
85 return 1
87 return 0
90 # if the current directory is_gfi_mirror then repack all packs listed in gfi-packs
91 repack_gfi_packs() {
92 [ -n "$gfi_mirror" ] || return 0
93 [ -d objects/pack ] || { rm -f gfi-packs; return 0; }
94 progress "~ [$proj] redeltifying poor quality git fast-import packs"
95 combine_packs --ignore-missing --no-reuse-delta <gfi-packs
96 rm -f gfi-packs
97 return 0
100 # combine small packs into larger pack(s)
101 # we avoid any keep, bndl or bitmap packs
102 # if the optional argument is non-empty even a single small pack will be redeltad
103 combine_small_packs() {
104 _didprogress=
105 _minsmallpacks=2
106 if [ -n "$1" ] && [ -n "$noreusedeltaopt" ]; then
107 _minsmallpacks=1
109 _lpo="--exclude-no-idx --exclude-keep --exclude-bitmap --exclude-bndl --quiet"
110 _lpo="$_lpo --object-limit $var_redelta_threshold objects/pack"
111 while
112 _cnt="$(list_packs --count $_lpo || :)"
113 test "${_cnt:-0}" -ge $_minsmallpacks
115 [ -n "$_didprogress" ] || {
116 progress "~ [$proj] combining small packs into a single larger pack"
117 _didprogress=1
119 _newp="$(list_packs $_lpo | combine_packs --names $noreusedeltaopt)"
120 _newc="$(echo $(echo "$_newp" | LC_ALL=C wc -w))"
121 # be paranoid and exit the loop if we haven't reduced the number of packs
122 [ $_newc -lt $_cnt ] || break
123 _minsmallpacks=2
124 done
125 return 0
128 # Unfortunately git-svn lacks the ability to store newly fetched revisions as a pack.
129 # However, the fetch code conveniently sets .svnpack just before it runs git-svn fetch
130 # so that it's easy to find all the objects that have been fetched by git-svn and
131 # combine them into a pack. The --no-reuse-delta option is meaningless here since
132 # everything to be packed is a loose object and therefore not a delta so deltification
133 # will always take place.
134 make_svn_pack() {
135 [ -f .svnpack ] && [ -n "$svn_mirror" ] || return 0
136 rm -f .svnpackgc
137 mv -f .svnpack .svnpackgc
138 progress "~ [$proj] combining loose git-svn objects into a pack"
139 _newp="$(find objects/$octet -maxdepth 1 -type f -newer .svnpackgc -name "$octet19" -print 2>/dev/null |
140 LC_ALL=C awk -F / '{print $2 $3}' |
141 run_combine_packs --objects --names $packopts --incremental --all-progress-implied $quiet --non-empty)" || {
142 mv -f .svnpackgc .svnpack
143 >.gc_failed
144 return 1
146 if [ -n "$_newp" ]; then
147 # remove the now-redundant loose objects -- this is always safe
148 # even during a concurrent push because a reprepare_packed_git
149 # will be triggered if an object that should be there is not
150 # found thereby finding it in the new pack instead
151 git prune-packed $quiet
153 rm -f .svnpackgc
156 # HEADSHA="$(pack_is_complete /full/path/to/some.pack /full/path/to/packed-refs "$(cat HEAD)")"
157 pack_is_complete() {
158 # Must have a matching .idx file and a non-empty packed-refs file
159 [ -s "${1%.pack}.idx" ] || return 1
160 [ -s "$2" ] || return 1
161 _headsha=
162 case "$3" in
163 $octet20)
164 _headsha="$3"
166 "ref: refs/"?*|"ref:refs/"?*|"refs/"?*)
167 _headmatch="${3#ref:}"
168 _headmatch="${_headmatch# }"
169 _headmatchpat="$(echo "$_headmatch" | LC_ALL=C sed -e 's/\([.$]\)/\\\1/g')"
170 _headsha="$(LC_ALL=C grep -e "^$octet20 $_headmatchpat\$" < "$2" | \
171 LC_ALL=C cut -d ' ' -f 1)"
172 case "$_headsha" in $octet20) :;; *)
173 return 1
174 esac
177 # bad HEAD
178 return 1
179 esac
180 rm -rf pack_is_complete_test
181 mkdir pack_is_complete_test
182 mkdir pack_is_complete_test/refs
183 mkdir pack_is_complete_test/objects
184 mkdir pack_is_complete_test/objects/pack
185 echo "$_headsha" > pack_is_complete_test/HEAD
186 ln -s "$1" pack_is_complete_test/objects/pack/
187 ln -s "${1%.pack}.idx" pack_is_complete_test/objects/pack/
188 ln -s "$2" pack_is_complete_test/packed-refs
189 _count="$(git --git-dir=pack_is_complete_test rev-list --count --all 2>/dev/null || :)"
190 rm -rf pack_is_complete_test
191 [ -n "$_count" ] || return 1
192 [ "$_count" -gt 0 ] 2>/dev/null || return 1
193 echo "$_headsha"
196 # On return a "$lockf" will have been created that must be removed when gc is done
197 lock_gc() {
198 # be compatibile with gc.pid file from newer Git releases
199 lockf=gc.pid
200 hn="$(hostname)"
201 active=
202 if [ "$(createlock "$lockf")" ]; then
203 # If $lockf is:
204 # 1) less than 12 hours old
205 # 2) contains two fields (pid hostname) NO trailing NL
206 # 3) the hostname is different OR the pid is still alive
207 # then we exit as another active process is holding the lock
208 if [ "$(find "$lockf" -maxdepth 1 -mmin -720 -print 2>/dev/null)" ]; then
209 apid=
210 ahost=
211 read -r apid ahost ajunk < "$lockf" || :
212 if [ "$apid" ] && [ "$ahost" ]; then
213 if [ "$ahost" != "$hn" ] || pidactive "$apid"; then
214 active=1
218 else
219 echo >&2 "[$proj] unable to create gc.pid.lock file"
220 exit 1
222 if [ -n "$active" ]; then
223 rm -f "$lockf.lock"
224 echo >&2 "[$proj] gc already running on machine '$ahost' pid '$apid'"
225 exit 1
227 printf "%s %s" "$$" "$hn" > "$lockf.lock"
228 chmod 0664 "$lockf.lock"
229 mv -f "$lockf.lock" "$lockf"
232 # Remove any crud that's been left behind by interrupted operations
233 # that did not clean up after themselves
234 remove_crud() {
235 # Remove any existing FETCH_HEAD
236 # There can only be a FETCH_HEAD if we've been fetching, not if we've been
237 # receiving pushes (those never create a FETCH_HEAD).
238 # And if we're fetching because we're a mirror, we know we're not fetching right
239 # now since jobd.pl never runs a project's fetch simultaneously with its gc.
240 # Therefore any existing FETCH_HEAD is junk. And it may be many megabytes if
241 # there were a lot of refs.
242 rm -f FETCH_HEAD
244 # Remove any stale pack remnants that are more than an hour old.
245 # Stale pack fragments are defined as any pack-<sha1>.ext where .ext is NOT
246 # .pack AND the corresponding .pack DOES NOT exist. A bunch of stale
247 # pack-<sha1>.idx files without their corresponding .pack files are worthless
248 # and just waste space. Normally there shouldn't be any remnants but actually
249 # this can happen when things are interrupted at just the wrong time.
250 # Note that the objects/pack directory is created by git init and should
251 # always exist.
252 find objects/pack -maxdepth 1 -type f -mmin +60 -name "pack-$octet20.?*" -print | \
253 LC_ALL=C sed -e 's/^objects\/pack\/pack-//; s/\..*$//' | LC_ALL=C sort -u | \
254 while read packsha; do
255 [ ! -e "objects/pack/pack-$packsha.pack" ] || continue
256 rm -f "objects/pack/pack-$packsha".?*
257 done
259 # Remove any stale pack .keep files that are more than 12 hours old.
260 # We don't do anything to create any permanent pack .keep files, so they must
261 # be remnants from some failed push or something. Removing the .keep will
262 # allow the pack to be properly repacked.
263 find objects/pack -maxdepth 1 -type f -mmin +720 -name "pack-$octet20.keep" -print0 | xargs -0 rm -f
265 # Remove any stale tmp_pack_*, tmp_idx_*, tmp_bitmap_*, packtmp-* or .tmp-*-pack files
266 # that are more than 12 hours old.
267 find objects/pack -maxdepth 1 -type f -mmin +720 \( \
268 -name "tmp_pack_?*" -o -name "tmp_idx_?*" -o -name "tmp_bitmap_?*" -o \
269 -name "packtmp-?*" -o -name ".tmp-?*-pack" \
270 \) -print0 | xargs -0 rm -f
272 # Remove any stale shallow_* files that are more than 12 hours old.
273 # These can be left behind by Git >= 1.8.4.2 and < 2.0.0 when a client
274 # requests a shallow clone.
275 find . -maxdepth 1 -type f -mmin +720 -name "shallow_?*" -print0 | xargs -0 rm -f
277 # Remove any stale *.temp files in the objects area that are more than 12 hours old.
278 # This can be stale sha1.temp, or stale *.pack.temp so we kill all stale *.temp.
279 find objects -type f -mmin +720 -name "*.temp" -print0 | xargs -0 rm -f
281 # Remove any stale *.lock files in the htmlcache area that might have been left
282 # behind after an abnormal exit during an attempt to update a cached file and
283 # are more than 1 hour old.
284 ! [ -d htmlcache ] || find htmlcache -type f -mmin +60 -name "*.lock" -print0 | xargs -0 rm -f
286 # Remove any stale git-svn temp files that are more than 12 hours old.
287 # The git-svn process creates temp files with random 10 character names
288 # in the root of $GIT_DIR. Unfortunately they do not have a recognizable
289 # prefix, so we just have to kill any files with a 10-character name. We
290 # do this only for git-svn mirrors. All characters are chosen from
291 # [A-Za-z0-9_] so we can at least check that and fortunately the only
292 # collision is 'FETCH_HEAD' but that shouldn't matter.
293 # There may also be temp files with a Git_ prefix as well.
294 if [ -n "$svn_mirror" ]; then
295 _randchar='[A-Za-z0-9_]'
296 _randchar2="$_randchar$_randchar"
297 _randchar4="$_randchar2$_randchar2"
298 _randchar10="$_randchar4$_randchar4$_randchar2"
299 find . -maxdepth 1 -type f -mmin +720 -name "$_randchar10" -print0 | xargs -0 rm -f
300 find . -maxdepth 1 -type f -mmin +720 -name "Git_*" -print0 | xargs -0 rm -f
303 # Remove any stale fast_import_crash_<pid> files that are more than 3 days old.
304 if [ -n "$gfi_mirror" ]; then
305 find . -maxdepth 1 -type f -mmin +4320 -name "fast_import_crash_?*" -print0 | xargs -0 rm -f
310 ## Garbage Collection Types
312 ## There are two kinds of possible garbage collection (gc) operations:
314 ## 1. A normal, full gc
315 ## 2. A "mini" gc
317 ## If the full garbage collection interval has expired (or gc has never been
318 ## run), then a normal, full gc will take place. Otherwise, a "mini" gc will
319 ## take place if the file .needsgc exists.
321 ## A "mini" gc is similar to "git gc --auto" in that it may not end up actually
322 ## doing anything unless the right conditions are present so it's not a burden
323 ## to run it often. If the file .needsgc exists, a "mini" gc will occur at
324 ## the next opportunity.
326 ## Note, however, that the .nogc file suppresses ALL gc activity (normal or mini).
329 proj="${1%.git}"
330 shift
331 cd "$cfg_reporoot/$proj.git"
332 [ -d objects/pack ] || { rm -f gfi-packs; mkdir -p objects/pack; }
333 mirror_url="$(get_mirror_url)"
334 svn_mirror=
335 ! is_svn_mirror_url "$mirror_url" || svn_mirror=1
336 gfi_mirror=
337 if [ -f gfi-packs -a -s gfi-packs ] && is_gfi_mirror_url "$mirror_url"; then
338 gfi_mirror=1
341 # If git config --bool --get girocco.redelta is explicitly false then automatic
342 # redelta when there are less than $var_redelta_threshold objects will be suppressed.
343 # On the other hand, if git config --get girocco.redelta is "always" then, on a full
344 # gc only, for the final repack, deltas will always be recomputed.
345 # This can be set on a per-project basis to avoid unusual pathological gc behavior.
346 # Setting this will hurt efficiency of the affected repository.
347 # Note that fast-import packs ALWAYS get new deltas regardless of this setting.
348 noreusedeltaopt="--no-reuse-delta"
349 [ "$(git config --bool --get girocco.redelta 2>/dev/null || :)" != "false" ] || noreusedeltaopt=
350 alwaysredelta=
351 [ "$(git config --get girocco.redelta 2>/dev/null || :)" != "always" ] || alwaysredelta=1
353 trap 'e=$?; rm -f .gc_in_progress; if [ $e != 0 ]; then echo "gc failed dir: $PWD" >&2; fi' EXIT
354 trap 'exit 130' INT
355 trap 'exit 143' TERM
357 # date -R is linux-only, POSIX equivalent is '+%a, %d %b %Y %T %z'
358 datefmt='+%a, %d %b %Y %T %z'
360 isminigc=
361 if check_interval lastgc $cfg_min_gc_interval; then
362 if [ -e .needsgc ]; then
363 isminigc=1
364 else
365 progress "= [$proj] garbage check skip (last at $(config_get lastgc))"
366 exit 0
369 if [ -e .nogc ]; then
370 progress "x [$proj] garbage check disabled"
371 exit 0
374 if [ -n "$isminigc" ]; then
375 # Perform a "mini" gc
376 # Note that .delaygc is ignored here as that's only intended for full gc
377 lock_gc
378 rm -f .allowgc .needsgc
379 remove_crud
380 miniactive=
381 if [ -f .svnpack ] && [ -n "$svn_mirror" ]; then
382 miniactive=1
383 progress "+ [$proj] mini garbage check (`date`)"
384 make_svn_pack
386 if [ -z "$cfg_delay_gfi_redelta" ] && [ -n "$gfi_mirror" ]; then
387 # $Girocco::Config::delay_gfi_redelta is false, force redeltification now
388 if [ -z "$miniactive" ]; then
389 miniactive=1
390 progress "+ [$proj] mini garbage check (`date`)"
392 repack_gfi_packs
394 # If there aren't at least 10 non-keep, non-bitmap, non-bndl packs then
395 # don't actually process them yet
396 lpo="--exclude-no-idx --exclude-keep --exclude-bitmap --exclude-bndl --quiet"
397 packcnt="$(list_packs --count $lpo objects/pack || :)"
398 if [ "${packcnt:-0}" -ge 10 ]; then
399 if [ -z "$miniactive" ]; then
400 miniactive=1
401 progress "+ [$proj] mini garbage check (`date`)"
403 if [ -n "$gfi_mirror" ]; then
404 repack_gfi_packs
405 packcnt="$(list_packs --count $lpo objects/pack || :)"
407 # if repack_gfi_packs dropped the pack count to < 10 don't combine
408 if [ "${packcnt:-0}" -ge 10 ]; then
409 combine_small_packs
410 packcnt="$(list_packs --count $lpo objects/pack || :)"
412 # if we still have more than 10 packs trigger a full gc
413 if [ "${packcnt:-0}" -ge 10 ]; then
414 # We shouldn't be in a .delaygc state at this point, but if
415 # we are then nuke it because we really need a full gc now
416 rm -f .delaygc
417 git config --unset gitweb.lastgc
418 rm -f "$lockf"
419 progress "- [$proj] mini garbage check triggering full gc too many packs (`date`)"
420 exit 0
423 rm -f "$lockf"
424 if [ -n "$miniactive" ]; then
425 git update-server-info
426 progress "- [$proj] mini garbage check (`date`)"
427 else
428 progress "= [$proj] mini garbage check nothing but crud removal to do (`date`)"
430 exit 0
433 # Avoid unnecessary garbage collections:
434 # 1. If lastreceive is set and is older than lastgc
435 # -AND-
436 # 2. We are not a fork (! -s alternates) -OR- lastparentgc is older than lastgc
438 # If lastgc is NOT set or lastreceive is NOT set we MUST run gc
439 # If we are a fork and lastparentgc is NOT set we MUST run gc
441 # If the repo is dirty after removing any crud we MUST run gc
443 gcstart="$(date "$datefmt")"
444 skipgc=
445 isfork=
446 [ -s objects/info/alternates ] && isfork=1
447 lastparentgcsecs=
448 [ -n "$isfork" ] && lastparentgcsecs="$(config_get_date_seconds lastparentgc || :)"
449 lastreceivesecs=
450 if lastreceivesecs="$(config_get_date_seconds lastreceive)" && \
451 lastgcsecs="$(config_get_date_seconds lastgc)" && \
452 [ $lastreceivesecs -lt $lastgcsecs ]; then
453 # We've run gc since we last received, so maybe we can skip,
454 # check if not fork or fork and lastparentgc < lastgc
455 if [ -n "$isfork" ]; then
456 if [ -n "$lastparentgcsecs" ] && \
457 [ $lastparentgcsecs -lt $lastgcsecs ]; then
458 # We've run gc since our parent ran gc so we can skip
459 skipgc=1
461 else
462 # We don't have any alternates (we're not a forK) so we can skip
463 skipgc=1
467 # Prevent any other simultaneous gc operations
468 lock_gc
470 # At this point, if .allowgc exists, it's now crud to be removed
471 rm -f .allowgc
473 # Ideally we would do this in post-receive, but that would mean duplicating the
474 # logic so it's available in the chroot jail and that's highly undesirable
475 # Instead, since the first gc will be triggered immediately following the first
476 # push, we do the check here as it's quick and harmless if HEAD is already valid
477 check_and_set_head || :
479 # Always get rid of crud
480 remove_crud
482 # Run 'git svn gc' now for svn mirrors
483 if [ -n "$svn_mirror" ]; then
484 git svn gc || :
487 # Skip the actual gc if .delaygc is set
488 if [ -e .delaygc ]; then
489 progress "x [$proj] garbage check delayed (except for crud removal)"
490 rm -f "$lockf"
491 exit 0
494 # Do not skip gc if the repo is dirty
495 if [ -n "$skipgc" ] && ! is_dirty; then
496 progress "= [$proj] garbage check nothing but crud removal to do (`date`)"
497 config_set lastgc "$gcstart"
498 rm -f "$lockf"
499 exit 0
502 bumptime=
503 if [ -n "$isfork" ] && [ -z "$lastparentgcsecs" ]; then
504 # set lastparentgc and then update gcstart to be at least 1 second later
505 config_set lastparentgc "$gcstart"
506 bumptime=1
508 if [ -z "$lastreceivesecs" ]; then
509 # set lastreceive and then update gcstart to be at least 1 second later
510 config_set lastreceive "$gcstart"
511 bumptime=1
513 if [ -n "$bumptime" ]; then
514 sleep 1
515 gcstart="$(date "$datefmt")"
518 progress "+ [$proj] garbage check (`date`)"
520 newdeltas=
521 [ -z "$alwaysredelta" ] || newdeltas=-f
522 if [ -z "$newdeltas" ] && [ -n "$gfi_mirror" ]; then
523 if [ $(list_packs --exclude-no-idx --count objects/pack) -le \
524 $(list_packs --exclude-no-idx --count --quiet --only gfi-packs) ]; then
525 # Don't bother with repack_gfi_packs since everything's being repacked
526 newdeltas=-f
529 if [ -z "$newdeltas" ] && [ -n "$noreusedeltaopt" ] && \
530 [ $(list_packs --exclude-no-idx --count-objects objects/pack) -le $var_redelta_threshold ]; then
531 # There aren't enough objects to worry about so just redelta to get the best pack
532 newdeltas=-f
534 if [ -z "$newdeltas" ]; then
535 # Since we're not going to recompute deltas overall, we need to do the
536 # "mini" maintenance so that we can get more optimal deltas
537 [ -z "$noreusedeltaopt" ] || make_svn_pack
538 repack_gfi_packs
539 force_single_pack_redelta=
540 [ -n "$gfi_mirror" ] || [ -n "$svn_mirror" ] || force_single_pack_redelta=1
541 [ -z "$noreusedeltaopt" ] || combine_small_packs $force_single_pack_redelta
545 ## Safe Pruning In Forks
547 ## We are about to perform garbage collection. We do NOT use the "git gc"
548 ## command directly as it does not provide enough control over the fine details
549 ## that we require. However, we DO maintain a "gc.pid" file during our garbage
550 ## collection so that a simultaneous "git gc" by an administrator will be
551 ## blocked (and similarly we refuse to start garbage collection if we cannot
552 ## create the "gc.pid" file). When we say "gc" in the below description we are
553 ## referring to our "gc.sh" script, NOT the "git gc" command.
555 ## If the project we are running garbage collection (gc) on has any forks we
556 ## must be careful not to remove any objects that while no longer referenced by
557 ## this project (the parent) are still referenced by one or more forks (the
558 ## children) otherwise the children will become corrupt and we can't abide
559 ## corrupt children.
561 ## One way to accomplish this is to simply hard-link all currently existing
562 ## loose objects and packs in the parent into all the children that refer to the
563 ## parent (via a line in their objects/info/alternates file) before beginning
564 ## the gc operation and then relying on a subsequent gc in the child to clean up
565 ## any excess objects/packs. We used to use this strategy but it's very
566 ## inefficient because:
568 ## 1. The disk space used by the old pack(s)/object(s) will not be reclaimed
569 ## until all children (and their children, if any) run gc by which time
570 ## it's quite possible the topmost parent will have run gc again and
571 ## hard-linked yet another old pack down to its children (not to mention
572 ## loose objects).
574 ## 2. As we are now using the "-A" option with "git repack", any new objects
575 ## in the parent that are not referenced by children will continually get
576 ## exploded out of the hard-linked pack in the children whenever the
577 ## children run gc.
579 ## 3. To avoid suboptimal and/or unnecessarily many packs being hard-linked
580 ## into child forks, we must run the "mini" gc maintenance before we
581 ## perform the hard-linking into the children which provides yet another
582 ## source of inefficiency.
584 ## Since we are using the "-A" option to "git repack" (that was not always the
585 ## case) to guarantee we can access old ref values for long enough to send out
586 ## a meaningful mail.sh notification, we now have another, more efficient,
587 ## option available to prevent corruption of child forks that continue to refer
588 ## to objects that are no longer reachable from any ref in the parent.
590 ## The only things that need be copied (or hard-linked) into the child fork(s)
591 ## are those objects that have become unreachable from any ref in the parent.
592 ## They are the only things that could ever be removed by "git prune" and
593 ## therefore the only things we need to prevent the loss of in order to avoid
594 ## corruption of the child fork(s).
596 ## Therefore we now use the following strategy instead to avoid excessive disk
597 ## use and lots of unnecessary loose objects in child forks:
599 ## 1. Run "git repack -A -d -l" in the parent BEFORE doing anything about
600 ## child forks.
602 ## 2. Hard-link all remaining existing loose objects in the parent into the
603 ## immediate child forks.
605 ## 3. Now run "git prune" in the parent.
607 ## With this new strategy we avoid the need to run any "mini" gc maintenance
608 ## before copying (or hard-linking) anything down to the child forks.
609 ## Furthermore, only when the parent performs a non-fast-forward update will
610 ## anything ever be transferred to the children leaving them unperturbed in the
611 ## vast majority of cases. Finally, even if the parent references objects the
612 ## children do not, those objects will no longer continually end up in the
613 ## children as unreachable loose objects after the children run gc.
616 git pack-refs --all
617 touch .gc_in_progress
618 rm -f .gc_failed bundles/*
619 rm -f objects/pack/pack-*.bndl
620 # We use the -A option with git repack so that unreachable objects can live
621 # on for a time as loose objects. This is particularly helpful if we just
622 # happen to be in the process of sending out a ref update for a ref that was
623 # force updated and the old ref value would have otherwise been removed by
624 # repack because it was now unreachable. Admittedly the window for gc to run
625 # and do that before we manage to send out the ref update is not large, but
626 # it would not be difficult to create such a situation. Unfortunately, when
627 # Git unpacks these unreachable objects it will give them the modification
628 # time of the *.pack file they came out of. This could be very, very old.
629 # If that happens, the subsequent git prune --expire some_time_ago will still
630 # remove the object(s) and our pending ref update will still lose out.
631 # To prevent this from happening and to get the behavior we want, we now
632 # touch the modification time of all pack-<sha>.pack files so that any
633 # loosened objects get a current time. Git does not provide any other
634 # mechanism to do this. We do not want to just touch all loose objects
635 # left after the repack because that would cause objects that were loosened
636 # previously to live on which we definitely do not want.
637 list_packs --exclude-no-idx objects/pack | xargs touch -c 2>/dev/null || :
638 # We wish to keep deltas from our last full pack so if we're not redeltaing
639 # then make sure the .pack associated with the .bitmap has a newer mod time
640 # (If there is no .bitmap then touch the pack with the most objects instead.)
641 if [ -z "$newdeltas" ]; then
642 bmpack="$(list_packs --exclude-no-bitmap --exclude-no-idx --max-matches 1 objects/pack)"
643 [ -n "$bmpack" ] || bmpack="$(list_packs --exclude-no-idx --max-matches 1 --object-limit -1 --include-boundary objects/pack)"
644 if [ -n "$bmpack" ] && [ -f "$bmpack" -a -s "$bmpack" ]; then
645 sleep 1
646 touch -c "$bmpack" 2>/dev/null || :
649 # The git repack command may issue a 'disabling bitmap' warning for some
650 # repositories. This is perfectly normal and should be suppressed unless
651 # show_progress is set. Unfortunately that means we have to grep -v the
652 # output. And furthermore, since it's a translated message, we have to
653 # force the language to english to be sure we do it.
654 repackcmd="git repack $packopts -A -d -l $quiet $newdeltas $@"
655 [ -n "$show_progress" ] || \
656 repackcmd="{ LC_ALL=C $repackcmd 2>&1 || touch .gc_failed; } | LC_ALL=C grep -v 'disabling bitmap' || :"
657 progress "~ [$proj] running full gc repack"
658 eval "$repackcmd"
659 [ ! -e .gc_failed ] || exit 1
660 # These, if they exist, are now meaningless and need to be removed
661 rm -f gfi-packs .needsgc .svnpack .svnpackgc
662 allpacks="$(echo objects/pack/pack-$octet20.pack)"
663 curhead="$(cat HEAD)"
664 pkrf=
665 [ ! -e packed-refs ] || pkrf=packed-refs
666 eval "reposizek=$(( $(echo 0 $(du -k $pkrf $allpacks 2>/dev/null | LC_ALL=C awk '{print $1}') | \
667 LC_ALL=C sed -e 's/ / + /g') ))"
668 git update-server-info
669 # The -A option to `git repack` may have caused some loose objects to pop
670 # out of their packs. We must make these objects group writable so that they
671 # can be freshened by other pushers. Technically we need only do this for
672 # push projects but to enable mirror projects to be more easily converted to
673 # push projects, we go ahead and do it for all projects.
674 { find objects/$octet -type f -name "$octet19" -print0 | xargs -0 chmod ug+w || :; } 2>/dev/null
676 if has_forks "$proj"; then
677 progress "~ [$proj] hard-linking loose objects into immediate child forks"
678 # We have to update the lastparentgc time in the child forks even if they do not get any
679 # new "loose objects" because they need to run gc just in case the parent now has some
680 # objects that used to only be in the child so they can be removed from the child.
681 # For example, a "patch" might be developed first in a fork and then later accepted into
682 # the parent in which case the objects making up the patch in the child fork are now
683 # redundant (since they're now in the parent as well) and need to be removed from the
684 # child fork which can only happen if the child fork runs gc.
685 shbin="${cfg_posix_sh_bin:-/bin/sh}"
686 forkdir="$proj"
687 # It is enough to copy objects just one level down and get_repo_list
688 # takes a regular expression (which is automatically prefixed with '^')
689 # so we can easily match forks exactly one level down from this project
690 get_repo_list "$forkdir/[^/]*:" |
691 while read fork; do
692 # Ignore forks that do not exist or are symbolic links
693 [ ! -L "$cfg_reporoot/$fork.git" -a -d "$cfg_reporoot/$fork.git" ] || \
694 continue
695 # Or do not have a non-zero length alternates file
696 [ -s "$cfg_reporoot/$fork.git/objects/info/alternates" ] || \
697 continue
698 # Match objects in parent project
699 for d in objects/$octet; do
700 [ "$d" != "objects/$octet" ] || continue
701 mkdir -p "$cfg_reporoot/$fork.git/$d"
702 find "$d" -maxdepth 1 -type f -name "$octet19" -print0 |
703 xargs -0 "$shbin" -c 'ln -f "$@" '"'$cfg_reporoot/$fork.git/$d/'" sh || :
704 done
705 # Update the fork's lastparentgc date (must be current, not $gcstart)
706 git --git-dir="$cfg_reporoot/$fork.git" config \
707 gitweb.lastparentgc "$(date "$datefmt")"
708 done
711 # The git prune command does not take a -q or --quiet but started outputting
712 # 'Checking connectivity' progress messages in v1.7.9. However, we can
713 # suppress those by piping through cat as it only activates the progress
714 # messages when stderr is a tty. We only expire loose objects older than one
715 # day just in case there's some pending action (such as sending out a ref
716 # update) in progress that might want to examine them. This may leave us with
717 # loose objects. That's okay because at the next gc interval, we will always
718 # run gc if we see any loose objects regardless of whether or not we've seen
719 # any updates or we've received new linked objects from our parent. Note that
720 # in order to keep loose objects that just recently became unreferenced but
721 # have a very old modification date around we rely on some help from both the
722 # update.sh and hooks/pre-receive scripts. Furthermore, since Git v2.2.0
723 # (d3038d22 prune: keep objects reachable from recent objects) an unreachable
724 # object that would otherwise be pruned (because it's too old) will be kept
725 # alive by an unreachable object that refers to it that's not old enough to
726 # be pruned yet.
727 prunecmd='git prune --expire 1_day_ago'
728 [ -n "$show_progress" ] || \
729 prunecmd="{ $prunecmd 2>&1 || touch .gc_failed; } | cat"
730 progress "~ [$proj] pruning expired unreachable loose objects"
731 eval "$prunecmd"
732 [ ! -e .gc_failed ] || exit 1
734 # darcs:// mirrors have a xxx.log file that will grow endlessly
735 # if this is a mirror and the file exists, shorten it to 10000 lines
736 # also take this opportunity to optimize the darcs repo
737 if [ ! -e .nofetch ] && [ -n "$cfg_mirror" ]; then
738 url="$(config_get baseurl || :)"
739 case "$url" in darcs://*)
740 if [ -n "$cfg_mirror_darcs" ]; then
741 url="${url%/}"
742 basedarcs="$(basename "${url#darcs:/}")"
743 if [ -f "$basedarcs.log" ]; then
744 tail -n 10000 "$basedarcs.log" > "$basedarcs.log.$$"
745 mv -f "$basedarcs.log.$$" "$basedarcs.log"
747 if [ -d "$basedarcs.darcs" ]; then
749 cd "$basedarcs.darcs"
750 # without show_progress suppress non-error output
751 [ -n "$show_progress" ] || exec >/dev/null
752 # Note that this does not optimize _darcs/inventories/ :(
753 darcs optimize || :
757 esac
760 # Create a matching .bndl header file for the all-in-one pack we just created
761 # but only if we're not a fork (otherwise the bundle would not be complete)
762 # and we are running at least Git version 1.7.2 (pack_is_complete always fails otherwise)
763 if [ ! -s objects/info/alternates ] && [ -n "$var_have_git_172" ]; then
764 # There should only be one pack in $allpacks but if there was a
765 # simultaneous push...
766 # The one we just created will have a .idx and will NOT have a .keep
767 progress "~ [$proj] creating downloadble bundle header"
768 pkfound=
769 pkhead=
770 for pk in $allpacks; do
771 [ -s "$pk" ] || continue
772 pkbase="${pk%.pack}"
773 [ -s "$pkbase.idx" ] || continue
774 [ ! -e "$pkbase.keep" ] || continue
775 if pkhead="$(pack_is_complete "$PWD/$pk" "$PWD/packed-refs" "$curhead")"; then
776 pkfound="$pkbase"
777 break;
779 done
780 if [ -n "$pkfound" -a -n "$pkhead" ]; then
782 echo "# v2 git bundle"
783 LC_ALL=C sed -ne "/^$octet20 refs\/[^ $tab]*\$/ p" < packed-refs
784 echo "$pkhead HEAD"
785 echo ""
786 } > "$pkbase.bndl"
787 bndletag="$("$cfg_basedir/bin/rangecgi" --etag -m 1 "$pkbase.bndl" "$pkbase.pack" || :)"
788 bndlsha="$(printf '%s' "$bndletag" | git hash-object --stdin || :)"
789 if [ -n "$bndletag" ]; then
790 case "$bndlsha" in $octet20)
791 bndlshatrailer="${bndlsha#????????}"
792 bndlshaprefix="${bndlsha%$bndlshatrailer}"
793 bndlname="$(TZ=UTC date +%Y%m%d_%H%M%S)-${bndlshaprefix:-0}"
794 [ -d bundles ] || mkdir bundles
795 echo "${pkbase#objects/pack/}.bndl" > "bundles/$bndlname"
796 echo "${pkbase#objects/pack/}.pack" >> "bundles/$bndlname"
797 ln -s -f -n "$bndlname" bundles/latest
798 esac
803 # Record the size of this repo as the sum of its *.pack sizes as 1024-byte blocks
804 config_set_raw girocco.reposizek "${reposizek:-0}"
806 # We use $gcstart here to avoid a race where a push occurs during the gc itself
807 # and the next future gc could be incorrectly skipped if we used the current
808 # timestamp here instead
809 config_set lastgc "$gcstart"
810 rm -f "$lockf"
812 progress "- [$proj] garbage check (`date`)"