1#include "cache.h"
2#include "tempfile.h"
3#include "lockfile.h"
4#include "commit.h"
5#include "tag.h"
6#include "pkt-line.h"
7#include "remote.h"
8#include "refs.h"
9#include "sha1-array.h"
10#include "diff.h"
11#include "revision.h"
12#include "commit-slab.h"
13#include "revision.h"
14#include "list-objects.h"
15
16static int is_shallow = -1;
17static struct stat_validity shallow_stat;
18static char *alternate_shallow_file;
19
20void set_alternate_shallow_file(const char *path, int override)
21{
22 if (is_shallow != -1)
23 die("BUG: is_repository_shallow must not be called before set_alternate_shallow_file");
24 if (alternate_shallow_file && !override)
25 return;
26 free(alternate_shallow_file);
27 alternate_shallow_file = xstrdup_or_null(path);
28}
29
30int register_shallow(const struct object_id *oid)
31{
32 struct commit_graft *graft =
33 xmalloc(sizeof(struct commit_graft));
34 struct commit *commit = lookup_commit(oid);
35
36 oidcpy(&graft->oid, oid);
37 graft->nr_parent = -1;
38 if (commit && commit->object.parsed)
39 commit->parents = NULL;
40 return register_commit_graft(graft, 0);
41}
42
43int is_repository_shallow(void)
44{
45 FILE *fp;
46 char buf[1024];
47 const char *path = alternate_shallow_file;
48
49 if (is_shallow >= 0)
50 return is_shallow;
51
52 if (!path)
53 path = git_path_shallow();
54 /*
55 * fetch-pack sets '--shallow-file ""' as an indicator that no
56 * shallow file should be used. We could just open it and it
57 * will likely fail. But let's do an explicit check instead.
58 */
59 if (!*path || (fp = fopen(path, "r")) == NULL) {
60 stat_validity_clear(&shallow_stat);
61 is_shallow = 0;
62 return is_shallow;
63 }
64 stat_validity_update(&shallow_stat, fileno(fp));
65 is_shallow = 1;
66
67 while (fgets(buf, sizeof(buf), fp)) {
68 struct object_id oid;
69 if (get_oid_hex(buf, &oid))
70 die("bad shallow line: %s", buf);
71 register_shallow(&oid);
72 }
73 fclose(fp);
74 return is_shallow;
75}
76
77struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
78 int shallow_flag, int not_shallow_flag)
79{
80 int i = 0, cur_depth = 0;
81 struct commit_list *result = NULL;
82 struct object_array stack = OBJECT_ARRAY_INIT;
83 struct commit *commit = NULL;
84 struct commit_graft *graft;
85
86 while (commit || i < heads->nr || stack.nr) {
87 struct commit_list *p;
88 if (!commit) {
89 if (i < heads->nr) {
90 commit = (struct commit *)
91 deref_tag(heads->objects[i++].item, NULL, 0);
92 if (!commit || commit->object.type != OBJ_COMMIT) {
93 commit = NULL;
94 continue;
95 }
96 if (!commit->util)
97 commit->util = xmalloc(sizeof(int));
98 *(int *)commit->util = 0;
99 cur_depth = 0;
100 } else {
101 commit = (struct commit *)
102 object_array_pop(&stack);
103 cur_depth = *(int *)commit->util;
104 }
105 }
106 parse_commit_or_die(commit);
107 cur_depth++;
108 if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
109 (is_repository_shallow() && !commit->parents &&
110 (graft = lookup_commit_graft(&commit->object.oid)) != NULL &&
111 graft->nr_parent < 0)) {
112 commit_list_insert(commit, &result);
113 commit->object.flags |= shallow_flag;
114 commit = NULL;
115 continue;
116 }
117 commit->object.flags |= not_shallow_flag;
118 for (p = commit->parents, commit = NULL; p; p = p->next) {
119 if (!p->item->util) {
120 int *pointer = xmalloc(sizeof(int));
121 p->item->util = pointer;
122 *pointer = cur_depth;
123 } else {
124 int *pointer = p->item->util;
125 if (cur_depth >= *pointer)
126 continue;
127 *pointer = cur_depth;
128 }
129 if (p->next)
130 add_object_array(&p->item->object,
131 NULL, &stack);
132 else {
133 commit = p->item;
134 cur_depth = *(int *)commit->util;
135 }
136 }
137 }
138
139 return result;
140}
141
142static void show_commit(struct commit *commit, void *data)
143{
144 commit_list_insert(commit, data);
145}
146
147/*
148 * Given rev-list arguments, run rev-list. All reachable commits
149 * except border ones are marked with not_shallow_flag. Border commits
150 * are marked with shallow_flag. The list of border/shallow commits
151 * are also returned.
152 */
153struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
154 int shallow_flag,
155 int not_shallow_flag)
156{
157 struct commit_list *result = NULL, *p;
158 struct commit_list *not_shallow_list = NULL;
159 struct rev_info revs;
160 int both_flags = shallow_flag | not_shallow_flag;
161
162 /*
163 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
164 * set at this point. But better be safe than sorry.
165 */
166 clear_object_flags(both_flags);
167
168 is_repository_shallow(); /* make sure shallows are read */
169
170 init_revisions(&revs, NULL);
171 save_commit_buffer = 0;
172 setup_revisions(ac, av, &revs, NULL);
173
174 if (prepare_revision_walk(&revs))
175 die("revision walk setup failed");
176 traverse_commit_list(&revs, show_commit, NULL, ¬_shallow_list);
177
178 if (!not_shallow_list)
179 die("no commits selected for shallow requests");
180
181 /* Mark all reachable commits as NOT_SHALLOW */
182 for (p = not_shallow_list; p; p = p->next)
183 p->item->object.flags |= not_shallow_flag;
184
185 /*
186 * mark border commits SHALLOW + NOT_SHALLOW.
187 * We cannot clear NOT_SHALLOW right now. Imagine border
188 * commit A is processed first, then commit B, whose parent is
189 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
190 * itself is considered border at step 2, which is incorrect.
191 */
192 for (p = not_shallow_list; p; p = p->next) {
193 struct commit *c = p->item;
194 struct commit_list *parent;
195
196 if (parse_commit(c))
197 die("unable to parse commit %s",
198 oid_to_hex(&c->object.oid));
199
200 for (parent = c->parents; parent; parent = parent->next)
201 if (!(parent->item->object.flags & not_shallow_flag)) {
202 c->object.flags |= shallow_flag;
203 commit_list_insert(c, &result);
204 break;
205 }
206 }
207 free_commit_list(not_shallow_list);
208
209 /*
210 * Now we can clean up NOT_SHALLOW on border commits. Having
211 * both flags set can confuse the caller.
212 */
213 for (p = result; p; p = p->next) {
214 struct object *o = &p->item->object;
215 if ((o->flags & both_flags) == both_flags)
216 o->flags &= ~not_shallow_flag;
217 }
218 return result;
219}
220
221static void check_shallow_file_for_update(void)
222{
223 if (is_shallow == -1)
224 die("BUG: shallow must be initialized by now");
225
226 if (!stat_validity_check(&shallow_stat, git_path_shallow()))
227 die("shallow file has changed since we read it");
228}
229
230#define SEEN_ONLY 1
231#define VERBOSE 2
232
233struct write_shallow_data {
234 struct strbuf *out;
235 int use_pack_protocol;
236 int count;
237 unsigned flags;
238};
239
240static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
241{
242 struct write_shallow_data *data = cb_data;
243 const char *hex = oid_to_hex(&graft->oid);
244 if (graft->nr_parent != -1)
245 return 0;
246 if (data->flags & SEEN_ONLY) {
247 struct commit *c = lookup_commit(&graft->oid);
248 if (!c || !(c->object.flags & SEEN)) {
249 if (data->flags & VERBOSE)
250 printf("Removing %s from .git/shallow\n",
251 oid_to_hex(&c->object.oid));
252 return 0;
253 }
254 }
255 data->count++;
256 if (data->use_pack_protocol)
257 packet_buf_write(data->out, "shallow %s", hex);
258 else {
259 strbuf_addstr(data->out, hex);
260 strbuf_addch(data->out, '\n');
261 }
262 return 0;
263}
264
265static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
266 const struct oid_array *extra,
267 unsigned flags)
268{
269 struct write_shallow_data data;
270 int i;
271 data.out = out;
272 data.use_pack_protocol = use_pack_protocol;
273 data.count = 0;
274 data.flags = flags;
275 for_each_commit_graft(write_one_shallow, &data);
276 if (!extra)
277 return data.count;
278 for (i = 0; i < extra->nr; i++) {
279 strbuf_addstr(out, oid_to_hex(extra->oid + i));
280 strbuf_addch(out, '\n');
281 data.count++;
282 }
283 return data.count;
284}
285
286int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
287 const struct oid_array *extra)
288{
289 return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
290}
291
292const char *setup_temporary_shallow(const struct oid_array *extra)
293{
294 struct tempfile *temp;
295 struct strbuf sb = STRBUF_INIT;
296
297 if (write_shallow_commits(&sb, 0, extra)) {
298 temp = xmks_tempfile(git_path("shallow_XXXXXX"));
299
300 if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
301 close_tempfile_gently(temp) < 0)
302 die_errno("failed to write to %s",
303 get_tempfile_path(temp));
304 strbuf_release(&sb);
305 return get_tempfile_path(temp);
306 }
307 /*
308 * is_repository_shallow() sees empty string as "no shallow
309 * file".
310 */
311 return "";
312}
313
314void setup_alternate_shallow(struct lock_file *shallow_lock,
315 const char **alternate_shallow_file,
316 const struct oid_array *extra)
317{
318 struct strbuf sb = STRBUF_INIT;
319 int fd;
320
321 fd = hold_lock_file_for_update(shallow_lock, git_path_shallow(),
322 LOCK_DIE_ON_ERROR);
323 check_shallow_file_for_update();
324 if (write_shallow_commits(&sb, 0, extra)) {
325 if (write_in_full(fd, sb.buf, sb.len) < 0)
326 die_errno("failed to write to %s",
327 get_lock_file_path(shallow_lock));
328 *alternate_shallow_file = get_lock_file_path(shallow_lock);
329 } else
330 /*
331 * is_repository_shallow() sees empty string as "no
332 * shallow file".
333 */
334 *alternate_shallow_file = "";
335 strbuf_release(&sb);
336}
337
338static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
339{
340 int fd = *(int *)cb;
341 if (graft->nr_parent == -1)
342 packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
343 return 0;
344}
345
346void advertise_shallow_grafts(int fd)
347{
348 if (!is_repository_shallow())
349 return;
350 for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
351}
352
353/*
354 * mark_reachable_objects() should have been run prior to this and all
355 * reachable commits marked as "SEEN".
356 */
357void prune_shallow(int show_only)
358{
359 static struct lock_file shallow_lock;
360 struct strbuf sb = STRBUF_INIT;
361 int fd;
362
363 if (show_only) {
364 write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY | VERBOSE);
365 strbuf_release(&sb);
366 return;
367 }
368 fd = hold_lock_file_for_update(&shallow_lock, git_path_shallow(),
369 LOCK_DIE_ON_ERROR);
370 check_shallow_file_for_update();
371 if (write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY)) {
372 if (write_in_full(fd, sb.buf, sb.len) < 0)
373 die_errno("failed to write to %s",
374 get_lock_file_path(&shallow_lock));
375 commit_lock_file(&shallow_lock);
376 } else {
377 unlink(git_path_shallow());
378 rollback_lock_file(&shallow_lock);
379 }
380 strbuf_release(&sb);
381}
382
383struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
384
385/*
386 * Step 1, split sender shallow commits into "ours" and "theirs"
387 * Step 2, clean "ours" based on .git/shallow
388 */
389void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
390{
391 int i;
392 trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
393 memset(info, 0, sizeof(*info));
394 info->shallow = sa;
395 if (!sa)
396 return;
397 ALLOC_ARRAY(info->ours, sa->nr);
398 ALLOC_ARRAY(info->theirs, sa->nr);
399 for (i = 0; i < sa->nr; i++) {
400 if (has_object_file(sa->oid + i)) {
401 struct commit_graft *graft;
402 graft = lookup_commit_graft(&sa->oid[i]);
403 if (graft && graft->nr_parent < 0)
404 continue;
405 info->ours[info->nr_ours++] = i;
406 } else
407 info->theirs[info->nr_theirs++] = i;
408 }
409}
410
411void clear_shallow_info(struct shallow_info *info)
412{
413 free(info->ours);
414 free(info->theirs);
415}
416
417/* Step 4, remove non-existent ones in "theirs" after getting the pack */
418
419void remove_nonexistent_theirs_shallow(struct shallow_info *info)
420{
421 struct object_id *oid = info->shallow->oid;
422 int i, dst;
423 trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
424 for (i = dst = 0; i < info->nr_theirs; i++) {
425 if (i != dst)
426 info->theirs[dst] = info->theirs[i];
427 if (has_object_file(oid + info->theirs[i]))
428 dst++;
429 }
430 info->nr_theirs = dst;
431}
432
433define_commit_slab(ref_bitmap, uint32_t *);
434
435#define POOL_SIZE (512 * 1024)
436
437struct paint_info {
438 struct ref_bitmap ref_bitmap;
439 unsigned nr_bits;
440 char **pools;
441 char *free, *end;
442 unsigned pool_count;
443};
444
445static uint32_t *paint_alloc(struct paint_info *info)
446{
447 unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
448 unsigned size = nr * sizeof(uint32_t);
449 void *p;
450 if (!info->pool_count || size > info->end - info->free) {
451 if (size > POOL_SIZE)
452 die("BUG: pool size too small for %d in paint_alloc()",
453 size);
454 info->pool_count++;
455 REALLOC_ARRAY(info->pools, info->pool_count);
456 info->free = xmalloc(POOL_SIZE);
457 info->pools[info->pool_count - 1] = info->free;
458 info->end = info->free + POOL_SIZE;
459 }
460 p = info->free;
461 info->free += size;
462 return p;
463}
464
465/*
466 * Given a commit SHA-1, walk down to parents until either SEEN,
467 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
468 * all walked commits.
469 */
470static void paint_down(struct paint_info *info, const struct object_id *oid,
471 unsigned int id)
472{
473 unsigned int i, nr;
474 struct commit_list *head = NULL;
475 int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
476 size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
477 struct commit *c = lookup_commit_reference_gently(oid, 1);
478 uint32_t *tmp; /* to be freed before return */
479 uint32_t *bitmap;
480
481 if (!c)
482 return;
483
484 tmp = xmalloc(bitmap_size);
485 bitmap = paint_alloc(info);
486 memset(bitmap, 0, bitmap_size);
487 bitmap[id / 32] |= (1U << (id % 32));
488 commit_list_insert(c, &head);
489 while (head) {
490 struct commit_list *p;
491 struct commit *c = pop_commit(&head);
492 uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
493
494 /* XXX check "UNINTERESTING" from pack bitmaps if available */
495 if (c->object.flags & (SEEN | UNINTERESTING))
496 continue;
497 else
498 c->object.flags |= SEEN;
499
500 if (*refs == NULL)
501 *refs = bitmap;
502 else {
503 memcpy(tmp, *refs, bitmap_size);
504 for (i = 0; i < bitmap_nr; i++)
505 tmp[i] |= bitmap[i];
506 if (memcmp(tmp, *refs, bitmap_size)) {
507 *refs = paint_alloc(info);
508 memcpy(*refs, tmp, bitmap_size);
509 }
510 }
511
512 if (c->object.flags & BOTTOM)
513 continue;
514
515 if (parse_commit(c))
516 die("unable to parse commit %s",
517 oid_to_hex(&c->object.oid));
518
519 for (p = c->parents; p; p = p->next) {
520 if (p->item->object.flags & SEEN)
521 continue;
522 commit_list_insert(p->item, &head);
523 }
524 }
525
526 nr = get_max_object_index();
527 for (i = 0; i < nr; i++) {
528 struct object *o = get_indexed_object(i);
529 if (o && o->type == OBJ_COMMIT)
530 o->flags &= ~SEEN;
531 }
532
533 free(tmp);
534}
535
536static int mark_uninteresting(const char *refname, const struct object_id *oid,
537 int flags, void *cb_data)
538{
539 struct commit *commit = lookup_commit_reference_gently(oid, 1);
540 if (!commit)
541 return 0;
542 commit->object.flags |= UNINTERESTING;
543 mark_parents_uninteresting(commit);
544 return 0;
545}
546
547static void post_assign_shallow(struct shallow_info *info,
548 struct ref_bitmap *ref_bitmap,
549 int *ref_status);
550/*
551 * Step 6(+7), associate shallow commits with new refs
552 *
553 * info->ref must be initialized before calling this function.
554 *
555 * If used is not NULL, it's an array of info->shallow->nr
556 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
557 * m-th shallow commit from info->shallow.
558 *
559 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
560 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
561 * the ref needs some shallow commits from either info->ours or
562 * info->theirs.
563 */
564void assign_shallow_commits_to_refs(struct shallow_info *info,
565 uint32_t **used, int *ref_status)
566{
567 struct object_id *oid = info->shallow->oid;
568 struct oid_array *ref = info->ref;
569 unsigned int i, nr;
570 int *shallow, nr_shallow = 0;
571 struct paint_info pi;
572
573 trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
574 ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
575 for (i = 0; i < info->nr_ours; i++)
576 shallow[nr_shallow++] = info->ours[i];
577 for (i = 0; i < info->nr_theirs; i++)
578 shallow[nr_shallow++] = info->theirs[i];
579
580 /*
581 * Prepare the commit graph to track what refs can reach what
582 * (new) shallow commits.
583 */
584 nr = get_max_object_index();
585 for (i = 0; i < nr; i++) {
586 struct object *o = get_indexed_object(i);
587 if (!o || o->type != OBJ_COMMIT)
588 continue;
589
590 o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
591 }
592
593 memset(&pi, 0, sizeof(pi));
594 init_ref_bitmap(&pi.ref_bitmap);
595 pi.nr_bits = ref->nr;
596
597 /*
598 * "--not --all" to cut short the traversal if new refs
599 * connect to old refs. If not (e.g. force ref updates) it'll
600 * have to go down to the current shallow commits.
601 */
602 head_ref(mark_uninteresting, NULL);
603 for_each_ref(mark_uninteresting, NULL);
604
605 /* Mark potential bottoms so we won't go out of bound */
606 for (i = 0; i < nr_shallow; i++) {
607 struct commit *c = lookup_commit(&oid[shallow[i]]);
608 c->object.flags |= BOTTOM;
609 }
610
611 for (i = 0; i < ref->nr; i++)
612 paint_down(&pi, ref->oid + i, i);
613
614 if (used) {
615 int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
616 memset(used, 0, sizeof(*used) * info->shallow->nr);
617 for (i = 0; i < nr_shallow; i++) {
618 const struct commit *c = lookup_commit(&oid[shallow[i]]);
619 uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
620 if (*map)
621 used[shallow[i]] = xmemdupz(*map, bitmap_size);
622 }
623 /*
624 * unreachable shallow commits are not removed from
625 * "ours" and "theirs". The user is supposed to run
626 * step 7 on every ref separately and not trust "ours"
627 * and "theirs" any more.
628 */
629 } else
630 post_assign_shallow(info, &pi.ref_bitmap, ref_status);
631
632 clear_ref_bitmap(&pi.ref_bitmap);
633 for (i = 0; i < pi.pool_count; i++)
634 free(pi.pools[i]);
635 free(pi.pools);
636 free(shallow);
637}
638
639struct commit_array {
640 struct commit **commits;
641 int nr, alloc;
642};
643
644static int add_ref(const char *refname, const struct object_id *oid,
645 int flags, void *cb_data)
646{
647 struct commit_array *ca = cb_data;
648 ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
649 ca->commits[ca->nr] = lookup_commit_reference_gently(oid, 1);
650 if (ca->commits[ca->nr])
651 ca->nr++;
652 return 0;
653}
654
655static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
656{
657 unsigned int i;
658 if (!ref_status)
659 return;
660 for (i = 0; i < nr; i++)
661 if (bitmap[i / 32] & (1U << (i % 32)))
662 ref_status[i]++;
663}
664
665/*
666 * Step 7, reachability test on "ours" at commit level
667 */
668static void post_assign_shallow(struct shallow_info *info,
669 struct ref_bitmap *ref_bitmap,
670 int *ref_status)
671{
672 struct object_id *oid = info->shallow->oid;
673 struct commit *c;
674 uint32_t **bitmap;
675 int dst, i, j;
676 int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
677 struct commit_array ca;
678
679 trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
680 if (ref_status)
681 memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
682
683 /* Remove unreachable shallow commits from "theirs" */
684 for (i = dst = 0; i < info->nr_theirs; i++) {
685 if (i != dst)
686 info->theirs[dst] = info->theirs[i];
687 c = lookup_commit(&oid[info->theirs[i]]);
688 bitmap = ref_bitmap_at(ref_bitmap, c);
689 if (!*bitmap)
690 continue;
691 for (j = 0; j < bitmap_nr; j++)
692 if (bitmap[0][j]) {
693 update_refstatus(ref_status, info->ref->nr, *bitmap);
694 dst++;
695 break;
696 }
697 }
698 info->nr_theirs = dst;
699
700 memset(&ca, 0, sizeof(ca));
701 head_ref(add_ref, &ca);
702 for_each_ref(add_ref, &ca);
703
704 /* Remove unreachable shallow commits from "ours" */
705 for (i = dst = 0; i < info->nr_ours; i++) {
706 if (i != dst)
707 info->ours[dst] = info->ours[i];
708 c = lookup_commit(&oid[info->ours[i]]);
709 bitmap = ref_bitmap_at(ref_bitmap, c);
710 if (!*bitmap)
711 continue;
712 for (j = 0; j < bitmap_nr; j++)
713 if (bitmap[0][j] &&
714 /* Step 7, reachability test at commit level */
715 !in_merge_bases_many(c, ca.nr, ca.commits)) {
716 update_refstatus(ref_status, info->ref->nr, *bitmap);
717 dst++;
718 break;
719 }
720 }
721 info->nr_ours = dst;
722
723 free(ca.commits);
724}
725
726/* (Delayed) step 7, reachability test at commit level */
727int delayed_reachability_test(struct shallow_info *si, int c)
728{
729 if (si->need_reachability_test[c]) {
730 struct commit *commit = lookup_commit(&si->shallow->oid[c]);
731
732 if (!si->commits) {
733 struct commit_array ca;
734
735 memset(&ca, 0, sizeof(ca));
736 head_ref(add_ref, &ca);
737 for_each_ref(add_ref, &ca);
738 si->commits = ca.commits;
739 si->nr_commits = ca.nr;
740 }
741
742 si->reachable[c] = in_merge_bases_many(commit,
743 si->nr_commits,
744 si->commits);
745 si->need_reachability_test[c] = 0;
746 }
747 return si->reachable[c];
748}