1#include"builtin.h" 2#include"cache.h" 3#include"attr.h" 4#include"object.h" 5#include"blob.h" 6#include"commit.h" 7#include"tag.h" 8#include"tree.h" 9#include"delta.h" 10#include"pack.h" 11#include"pack-revindex.h" 12#include"csum-file.h" 13#include"tree-walk.h" 14#include"diff.h" 15#include"revision.h" 16#include"list-objects.h" 17#include"pack-objects.h" 18#include"progress.h" 19#include"refs.h" 20#include"streaming.h" 21#include"thread-utils.h" 22#include"pack-bitmap.h" 23#include"reachable.h" 24#include"sha1-array.h" 25#include"argv-array.h" 26#include"mru.h" 27 28static const char*pack_usage[] = { 29N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 30N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 31 NULL 32}; 33 34/* 35 * Objects we are going to pack are collected in the `to_pack` structure. 36 * It contains an array (dynamically expanded) of the object data, and a map 37 * that can resolve SHA1s to their position in the array. 38 */ 39static struct packing_data to_pack; 40 41static struct pack_idx_entry **written_list; 42static uint32_t nr_result, nr_written; 43 44static int non_empty; 45static int reuse_delta =1, reuse_object =1; 46static int keep_unreachable, unpack_unreachable, include_tag; 47static unsigned long unpack_unreachable_expiration; 48static int pack_loose_unreachable; 49static int local; 50static int have_non_local_packs; 51static int incremental; 52static int ignore_packed_keep; 53static int allow_ofs_delta; 54static struct pack_idx_option pack_idx_opts; 55static const char*base_name; 56static int progress =1; 57static int window =10; 58static unsigned long pack_size_limit; 59static int depth =50; 60static int delta_search_threads; 61static int pack_to_stdout; 62static int num_preferred_base; 63static struct progress *progress_state; 64 65static struct packed_git *reuse_packfile; 66static uint32_t reuse_packfile_objects; 67static off_t reuse_packfile_offset; 68 69static int use_bitmap_index_default =1; 70static int use_bitmap_index = -1; 71static int write_bitmap_index; 72static uint16_t write_bitmap_options; 73 74static unsigned long delta_cache_size =0; 75static unsigned long max_delta_cache_size =256*1024*1024; 76static unsigned long cache_max_small_delta_size =1000; 77 78static unsigned long window_memory_limit =0; 79 80/* 81 * stats 82 */ 83static uint32_t written, written_delta; 84static uint32_t reused, reused_delta; 85 86/* 87 * Indexed commits 88 */ 89static struct commit **indexed_commits; 90static unsigned int indexed_commits_nr; 91static unsigned int indexed_commits_alloc; 92 93static voidindex_commit_for_bitmap(struct commit *commit) 94{ 95if(indexed_commits_nr >= indexed_commits_alloc) { 96 indexed_commits_alloc = (indexed_commits_alloc +32) *2; 97REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 98} 99 100 indexed_commits[indexed_commits_nr++] = commit; 101} 102 103static void*get_delta(struct object_entry *entry) 104{ 105unsigned long size, base_size, delta_size; 106void*buf, *base_buf, *delta_buf; 107enum object_type type; 108 109 buf =read_sha1_file(entry->idx.sha1, &type, &size); 110if(!buf) 111die("unable to read%s",sha1_to_hex(entry->idx.sha1)); 112 base_buf =read_sha1_file(entry->delta->idx.sha1, &type, &base_size); 113if(!base_buf) 114die("unable to read%s",sha1_to_hex(entry->delta->idx.sha1)); 115 delta_buf =diff_delta(base_buf, base_size, 116 buf, size, &delta_size,0); 117if(!delta_buf || delta_size != entry->delta_size) 118die("delta size changed"); 119free(buf); 120free(base_buf); 121return delta_buf; 122} 123 124static unsigned longdo_compress(void**pptr,unsigned long size) 125{ 126 git_zstream stream; 127void*in, *out; 128unsigned long maxsize; 129 130git_deflate_init(&stream, pack_compression_level); 131 maxsize =git_deflate_bound(&stream, size); 132 133 in = *pptr; 134 out =xmalloc(maxsize); 135*pptr = out; 136 137 stream.next_in = in; 138 stream.avail_in = size; 139 stream.next_out = out; 140 stream.avail_out = maxsize; 141while(git_deflate(&stream, Z_FINISH) == Z_OK) 142;/* nothing */ 143git_deflate_end(&stream); 144 145free(in); 146return stream.total_out; 147} 148 149static unsigned longwrite_large_blob_data(struct git_istream *st,struct sha1file *f, 150const unsigned char*sha1) 151{ 152 git_zstream stream; 153unsigned char ibuf[1024*16]; 154unsigned char obuf[1024*16]; 155unsigned long olen =0; 156 157git_deflate_init(&stream, pack_compression_level); 158 159for(;;) { 160 ssize_t readlen; 161int zret = Z_OK; 162 readlen =read_istream(st, ibuf,sizeof(ibuf)); 163if(readlen == -1) 164die(_("unable to read%s"),sha1_to_hex(sha1)); 165 166 stream.next_in = ibuf; 167 stream.avail_in = readlen; 168while((stream.avail_in || readlen ==0) && 169(zret == Z_OK || zret == Z_BUF_ERROR)) { 170 stream.next_out = obuf; 171 stream.avail_out =sizeof(obuf); 172 zret =git_deflate(&stream, readlen ?0: Z_FINISH); 173sha1write(f, obuf, stream.next_out - obuf); 174 olen += stream.next_out - obuf; 175} 176if(stream.avail_in) 177die(_("deflate error (%d)"), zret); 178if(readlen ==0) { 179if(zret != Z_STREAM_END) 180die(_("deflate error (%d)"), zret); 181break; 182} 183} 184git_deflate_end(&stream); 185return olen; 186} 187 188/* 189 * we are going to reuse the existing object data as is. make 190 * sure it is not corrupt. 191 */ 192static intcheck_pack_inflate(struct packed_git *p, 193struct pack_window **w_curs, 194 off_t offset, 195 off_t len, 196unsigned long expect) 197{ 198 git_zstream stream; 199unsigned char fakebuf[4096], *in; 200int st; 201 202memset(&stream,0,sizeof(stream)); 203git_inflate_init(&stream); 204do{ 205 in =use_pack(p, w_curs, offset, &stream.avail_in); 206 stream.next_in = in; 207 stream.next_out = fakebuf; 208 stream.avail_out =sizeof(fakebuf); 209 st =git_inflate(&stream, Z_FINISH); 210 offset += stream.next_in - in; 211}while(st == Z_OK || st == Z_BUF_ERROR); 212git_inflate_end(&stream); 213return(st == Z_STREAM_END && 214 stream.total_out == expect && 215 stream.total_in == len) ?0: -1; 216} 217 218static voidcopy_pack_data(struct sha1file *f, 219struct packed_git *p, 220struct pack_window **w_curs, 221 off_t offset, 222 off_t len) 223{ 224unsigned char*in; 225unsigned long avail; 226 227while(len) { 228 in =use_pack(p, w_curs, offset, &avail); 229if(avail > len) 230 avail = (unsigned long)len; 231sha1write(f, in, avail); 232 offset += avail; 233 len -= avail; 234} 235} 236 237/* Return 0 if we will bust the pack-size limit */ 238static unsigned longwrite_no_reuse_object(struct sha1file *f,struct object_entry *entry, 239unsigned long limit,int usable_delta) 240{ 241unsigned long size, datalen; 242unsigned char header[MAX_PACK_OBJECT_HEADER], 243 dheader[MAX_PACK_OBJECT_HEADER]; 244unsigned hdrlen; 245enum object_type type; 246void*buf; 247struct git_istream *st = NULL; 248 249if(!usable_delta) { 250if(entry->type == OBJ_BLOB && 251 entry->size > big_file_threshold && 252(st =open_istream(entry->idx.sha1, &type, &size, NULL)) != NULL) 253 buf = NULL; 254else{ 255 buf =read_sha1_file(entry->idx.sha1, &type, &size); 256if(!buf) 257die(_("unable to read%s"),sha1_to_hex(entry->idx.sha1)); 258} 259/* 260 * make sure no cached delta data remains from a 261 * previous attempt before a pack split occurred. 262 */ 263free(entry->delta_data); 264 entry->delta_data = NULL; 265 entry->z_delta_size =0; 266}else if(entry->delta_data) { 267 size = entry->delta_size; 268 buf = entry->delta_data; 269 entry->delta_data = NULL; 270 type = (allow_ofs_delta && entry->delta->idx.offset) ? 271 OBJ_OFS_DELTA : OBJ_REF_DELTA; 272}else{ 273 buf =get_delta(entry); 274 size = entry->delta_size; 275 type = (allow_ofs_delta && entry->delta->idx.offset) ? 276 OBJ_OFS_DELTA : OBJ_REF_DELTA; 277} 278 279if(st)/* large blob case, just assume we don't compress well */ 280 datalen = size; 281else if(entry->z_delta_size) 282 datalen = entry->z_delta_size; 283else 284 datalen =do_compress(&buf, size); 285 286/* 287 * The object header is a byte of 'type' followed by zero or 288 * more bytes of length. 289 */ 290 hdrlen =encode_in_pack_object_header(header,sizeof(header), 291 type, size); 292 293if(type == OBJ_OFS_DELTA) { 294/* 295 * Deltas with relative base contain an additional 296 * encoding of the relative offset for the delta 297 * base from this object's position in the pack. 298 */ 299 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 300unsigned pos =sizeof(dheader) -1; 301 dheader[pos] = ofs &127; 302while(ofs >>=7) 303 dheader[--pos] =128| (--ofs &127); 304if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 305if(st) 306close_istream(st); 307free(buf); 308return0; 309} 310sha1write(f, header, hdrlen); 311sha1write(f, dheader + pos,sizeof(dheader) - pos); 312 hdrlen +=sizeof(dheader) - pos; 313}else if(type == OBJ_REF_DELTA) { 314/* 315 * Deltas with a base reference contain 316 * an additional 20 bytes for the base sha1. 317 */ 318if(limit && hdrlen +20+ datalen +20>= limit) { 319if(st) 320close_istream(st); 321free(buf); 322return0; 323} 324sha1write(f, header, hdrlen); 325sha1write(f, entry->delta->idx.sha1,20); 326 hdrlen +=20; 327}else{ 328if(limit && hdrlen + datalen +20>= limit) { 329if(st) 330close_istream(st); 331free(buf); 332return0; 333} 334sha1write(f, header, hdrlen); 335} 336if(st) { 337 datalen =write_large_blob_data(st, f, entry->idx.sha1); 338close_istream(st); 339}else{ 340sha1write(f, buf, datalen); 341free(buf); 342} 343 344return hdrlen + datalen; 345} 346 347/* Return 0 if we will bust the pack-size limit */ 348static off_t write_reuse_object(struct sha1file *f,struct object_entry *entry, 349unsigned long limit,int usable_delta) 350{ 351struct packed_git *p = entry->in_pack; 352struct pack_window *w_curs = NULL; 353struct revindex_entry *revidx; 354 off_t offset; 355enum object_type type = entry->type; 356 off_t datalen; 357unsigned char header[MAX_PACK_OBJECT_HEADER], 358 dheader[MAX_PACK_OBJECT_HEADER]; 359unsigned hdrlen; 360 361if(entry->delta) 362 type = (allow_ofs_delta && entry->delta->idx.offset) ? 363 OBJ_OFS_DELTA : OBJ_REF_DELTA; 364 hdrlen =encode_in_pack_object_header(header,sizeof(header), 365 type, entry->size); 366 367 offset = entry->in_pack_offset; 368 revidx =find_pack_revindex(p, offset); 369 datalen = revidx[1].offset - offset; 370if(!pack_to_stdout && p->index_version >1&& 371check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 372error("bad packed object CRC for%s",sha1_to_hex(entry->idx.sha1)); 373unuse_pack(&w_curs); 374returnwrite_no_reuse_object(f, entry, limit, usable_delta); 375} 376 377 offset += entry->in_pack_header_size; 378 datalen -= entry->in_pack_header_size; 379 380if(!pack_to_stdout && p->index_version ==1&& 381check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) { 382error("corrupt packed object for%s",sha1_to_hex(entry->idx.sha1)); 383unuse_pack(&w_curs); 384returnwrite_no_reuse_object(f, entry, limit, usable_delta); 385} 386 387if(type == OBJ_OFS_DELTA) { 388 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 389unsigned pos =sizeof(dheader) -1; 390 dheader[pos] = ofs &127; 391while(ofs >>=7) 392 dheader[--pos] =128| (--ofs &127); 393if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 394unuse_pack(&w_curs); 395return0; 396} 397sha1write(f, header, hdrlen); 398sha1write(f, dheader + pos,sizeof(dheader) - pos); 399 hdrlen +=sizeof(dheader) - pos; 400 reused_delta++; 401}else if(type == OBJ_REF_DELTA) { 402if(limit && hdrlen +20+ datalen +20>= limit) { 403unuse_pack(&w_curs); 404return0; 405} 406sha1write(f, header, hdrlen); 407sha1write(f, entry->delta->idx.sha1,20); 408 hdrlen +=20; 409 reused_delta++; 410}else{ 411if(limit && hdrlen + datalen +20>= limit) { 412unuse_pack(&w_curs); 413return0; 414} 415sha1write(f, header, hdrlen); 416} 417copy_pack_data(f, p, &w_curs, offset, datalen); 418unuse_pack(&w_curs); 419 reused++; 420return hdrlen + datalen; 421} 422 423/* Return 0 if we will bust the pack-size limit */ 424static off_t write_object(struct sha1file *f, 425struct object_entry *entry, 426 off_t write_offset) 427{ 428unsigned long limit; 429 off_t len; 430int usable_delta, to_reuse; 431 432if(!pack_to_stdout) 433crc32_begin(f); 434 435/* apply size limit if limited packsize and not first object */ 436if(!pack_size_limit || !nr_written) 437 limit =0; 438else if(pack_size_limit <= write_offset) 439/* 440 * the earlier object did not fit the limit; avoid 441 * mistaking this with unlimited (i.e. limit = 0). 442 */ 443 limit =1; 444else 445 limit = pack_size_limit - write_offset; 446 447if(!entry->delta) 448 usable_delta =0;/* no delta */ 449else if(!pack_size_limit) 450 usable_delta =1;/* unlimited packfile */ 451else if(entry->delta->idx.offset == (off_t)-1) 452 usable_delta =0;/* base was written to another pack */ 453else if(entry->delta->idx.offset) 454 usable_delta =1;/* base already exists in this pack */ 455else 456 usable_delta =0;/* base could end up in another pack */ 457 458if(!reuse_object) 459 to_reuse =0;/* explicit */ 460else if(!entry->in_pack) 461 to_reuse =0;/* can't reuse what we don't have */ 462else if(entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA) 463/* check_object() decided it for us ... */ 464 to_reuse = usable_delta; 465/* ... but pack split may override that */ 466else if(entry->type != entry->in_pack_type) 467 to_reuse =0;/* pack has delta which is unusable */ 468else if(entry->delta) 469 to_reuse =0;/* we want to pack afresh */ 470else 471 to_reuse =1;/* we have it in-pack undeltified, 472 * and we do not need to deltify it. 473 */ 474 475if(!to_reuse) 476 len =write_no_reuse_object(f, entry, limit, usable_delta); 477else 478 len =write_reuse_object(f, entry, limit, usable_delta); 479if(!len) 480return0; 481 482if(usable_delta) 483 written_delta++; 484 written++; 485if(!pack_to_stdout) 486 entry->idx.crc32 =crc32_end(f); 487return len; 488} 489 490enum write_one_status { 491 WRITE_ONE_SKIP = -1,/* already written */ 492 WRITE_ONE_BREAK =0,/* writing this will bust the limit; not written */ 493 WRITE_ONE_WRITTEN =1,/* normal */ 494 WRITE_ONE_RECURSIVE =2/* already scheduled to be written */ 495}; 496 497static enum write_one_status write_one(struct sha1file *f, 498struct object_entry *e, 499 off_t *offset) 500{ 501 off_t size; 502int recursing; 503 504/* 505 * we set offset to 1 (which is an impossible value) to mark 506 * the fact that this object is involved in "write its base 507 * first before writing a deltified object" recursion. 508 */ 509 recursing = (e->idx.offset ==1); 510if(recursing) { 511warning("recursive delta detected for object%s", 512sha1_to_hex(e->idx.sha1)); 513return WRITE_ONE_RECURSIVE; 514}else if(e->idx.offset || e->preferred_base) { 515/* offset is non zero if object is written already. */ 516return WRITE_ONE_SKIP; 517} 518 519/* if we are deltified, write out base object first. */ 520if(e->delta) { 521 e->idx.offset =1;/* now recurse */ 522switch(write_one(f, e->delta, offset)) { 523case WRITE_ONE_RECURSIVE: 524/* we cannot depend on this one */ 525 e->delta = NULL; 526break; 527default: 528break; 529case WRITE_ONE_BREAK: 530 e->idx.offset = recursing; 531return WRITE_ONE_BREAK; 532} 533} 534 535 e->idx.offset = *offset; 536 size =write_object(f, e, *offset); 537if(!size) { 538 e->idx.offset = recursing; 539return WRITE_ONE_BREAK; 540} 541 written_list[nr_written++] = &e->idx; 542 543/* make sure off_t is sufficiently large not to wrap */ 544if(signed_add_overflows(*offset, size)) 545die("pack too large for current definition of off_t"); 546*offset += size; 547return WRITE_ONE_WRITTEN; 548} 549 550static intmark_tagged(const char*path,const struct object_id *oid,int flag, 551void*cb_data) 552{ 553unsigned char peeled[20]; 554struct object_entry *entry =packlist_find(&to_pack, oid->hash, NULL); 555 556if(entry) 557 entry->tagged =1; 558if(!peel_ref(path, peeled)) { 559 entry =packlist_find(&to_pack, peeled, NULL); 560if(entry) 561 entry->tagged =1; 562} 563return0; 564} 565 566staticinlinevoidadd_to_write_order(struct object_entry **wo, 567unsigned int*endp, 568struct object_entry *e) 569{ 570if(e->filled) 571return; 572 wo[(*endp)++] = e; 573 e->filled =1; 574} 575 576static voidadd_descendants_to_write_order(struct object_entry **wo, 577unsigned int*endp, 578struct object_entry *e) 579{ 580int add_to_order =1; 581while(e) { 582if(add_to_order) { 583struct object_entry *s; 584/* add this node... */ 585add_to_write_order(wo, endp, e); 586/* all its siblings... */ 587for(s = e->delta_sibling; s; s = s->delta_sibling) { 588add_to_write_order(wo, endp, s); 589} 590} 591/* drop down a level to add left subtree nodes if possible */ 592if(e->delta_child) { 593 add_to_order =1; 594 e = e->delta_child; 595}else{ 596 add_to_order =0; 597/* our sibling might have some children, it is next */ 598if(e->delta_sibling) { 599 e = e->delta_sibling; 600continue; 601} 602/* go back to our parent node */ 603 e = e->delta; 604while(e && !e->delta_sibling) { 605/* we're on the right side of a subtree, keep 606 * going up until we can go right again */ 607 e = e->delta; 608} 609if(!e) { 610/* done- we hit our original root node */ 611return; 612} 613/* pass it off to sibling at this level */ 614 e = e->delta_sibling; 615} 616}; 617} 618 619static voidadd_family_to_write_order(struct object_entry **wo, 620unsigned int*endp, 621struct object_entry *e) 622{ 623struct object_entry *root; 624 625for(root = e; root->delta; root = root->delta) 626;/* nothing */ 627add_descendants_to_write_order(wo, endp, root); 628} 629 630static struct object_entry **compute_write_order(void) 631{ 632unsigned int i, wo_end, last_untagged; 633 634struct object_entry **wo; 635struct object_entry *objects = to_pack.objects; 636 637for(i =0; i < to_pack.nr_objects; i++) { 638 objects[i].tagged =0; 639 objects[i].filled =0; 640 objects[i].delta_child = NULL; 641 objects[i].delta_sibling = NULL; 642} 643 644/* 645 * Fully connect delta_child/delta_sibling network. 646 * Make sure delta_sibling is sorted in the original 647 * recency order. 648 */ 649for(i = to_pack.nr_objects; i >0;) { 650struct object_entry *e = &objects[--i]; 651if(!e->delta) 652continue; 653/* Mark me as the first child */ 654 e->delta_sibling = e->delta->delta_child; 655 e->delta->delta_child = e; 656} 657 658/* 659 * Mark objects that are at the tip of tags. 660 */ 661for_each_tag_ref(mark_tagged, NULL); 662 663/* 664 * Give the objects in the original recency order until 665 * we see a tagged tip. 666 */ 667ALLOC_ARRAY(wo, to_pack.nr_objects); 668for(i = wo_end =0; i < to_pack.nr_objects; i++) { 669if(objects[i].tagged) 670break; 671add_to_write_order(wo, &wo_end, &objects[i]); 672} 673 last_untagged = i; 674 675/* 676 * Then fill all the tagged tips. 677 */ 678for(; i < to_pack.nr_objects; i++) { 679if(objects[i].tagged) 680add_to_write_order(wo, &wo_end, &objects[i]); 681} 682 683/* 684 * And then all remaining commits and tags. 685 */ 686for(i = last_untagged; i < to_pack.nr_objects; i++) { 687if(objects[i].type != OBJ_COMMIT && 688 objects[i].type != OBJ_TAG) 689continue; 690add_to_write_order(wo, &wo_end, &objects[i]); 691} 692 693/* 694 * And then all the trees. 695 */ 696for(i = last_untagged; i < to_pack.nr_objects; i++) { 697if(objects[i].type != OBJ_TREE) 698continue; 699add_to_write_order(wo, &wo_end, &objects[i]); 700} 701 702/* 703 * Finally all the rest in really tight order 704 */ 705for(i = last_untagged; i < to_pack.nr_objects; i++) { 706if(!objects[i].filled) 707add_family_to_write_order(wo, &wo_end, &objects[i]); 708} 709 710if(wo_end != to_pack.nr_objects) 711die("ordered%uobjects, expected %"PRIu32, wo_end, to_pack.nr_objects); 712 713return wo; 714} 715 716static off_t write_reused_pack(struct sha1file *f) 717{ 718unsigned char buffer[8192]; 719 off_t to_write, total; 720int fd; 721 722if(!is_pack_valid(reuse_packfile)) 723die("packfile is invalid:%s", reuse_packfile->pack_name); 724 725 fd =git_open(reuse_packfile->pack_name); 726if(fd <0) 727die_errno("unable to open packfile for reuse:%s", 728 reuse_packfile->pack_name); 729 730if(lseek(fd,sizeof(struct pack_header), SEEK_SET) == -1) 731die_errno("unable to seek in reused packfile"); 732 733if(reuse_packfile_offset <0) 734 reuse_packfile_offset = reuse_packfile->pack_size -20; 735 736 total = to_write = reuse_packfile_offset -sizeof(struct pack_header); 737 738while(to_write) { 739int read_pack =xread(fd, buffer,sizeof(buffer)); 740 741if(read_pack <=0) 742die_errno("unable to read from reused packfile"); 743 744if(read_pack > to_write) 745 read_pack = to_write; 746 747sha1write(f, buffer, read_pack); 748 to_write -= read_pack; 749 750/* 751 * We don't know the actual number of objects written, 752 * only how many bytes written, how many bytes total, and 753 * how many objects total. So we can fake it by pretending all 754 * objects we are writing are the same size. This gives us a 755 * smooth progress meter, and at the end it matches the true 756 * answer. 757 */ 758 written = reuse_packfile_objects * 759(((double)(total - to_write)) / total); 760display_progress(progress_state, written); 761} 762 763close(fd); 764 written = reuse_packfile_objects; 765display_progress(progress_state, written); 766return reuse_packfile_offset -sizeof(struct pack_header); 767} 768 769static const char no_split_warning[] =N_( 770"disabling bitmap writing, packs are split due to pack.packSizeLimit" 771); 772 773static voidwrite_pack_file(void) 774{ 775uint32_t i =0, j; 776struct sha1file *f; 777 off_t offset; 778uint32_t nr_remaining = nr_result; 779time_t last_mtime =0; 780struct object_entry **write_order; 781 782if(progress > pack_to_stdout) 783 progress_state =start_progress(_("Writing objects"), nr_result); 784ALLOC_ARRAY(written_list, to_pack.nr_objects); 785 write_order =compute_write_order(); 786 787do{ 788unsigned char sha1[20]; 789char*pack_tmp_name = NULL; 790 791if(pack_to_stdout) 792 f =sha1fd_throughput(1,"<stdout>", progress_state); 793else 794 f =create_tmp_packfile(&pack_tmp_name); 795 796 offset =write_pack_header(f, nr_remaining); 797 798if(reuse_packfile) { 799 off_t packfile_size; 800assert(pack_to_stdout); 801 802 packfile_size =write_reused_pack(f); 803 offset += packfile_size; 804} 805 806 nr_written =0; 807for(; i < to_pack.nr_objects; i++) { 808struct object_entry *e = write_order[i]; 809if(write_one(f, e, &offset) == WRITE_ONE_BREAK) 810break; 811display_progress(progress_state, written); 812} 813 814/* 815 * Did we write the wrong # entries in the header? 816 * If so, rewrite it like in fast-import 817 */ 818if(pack_to_stdout) { 819sha1close(f, sha1, CSUM_CLOSE); 820}else if(nr_written == nr_remaining) { 821sha1close(f, sha1, CSUM_FSYNC); 822}else{ 823int fd =sha1close(f, sha1,0); 824fixup_pack_header_footer(fd, sha1, pack_tmp_name, 825 nr_written, sha1, offset); 826close(fd); 827if(write_bitmap_index) { 828warning(_(no_split_warning)); 829 write_bitmap_index =0; 830} 831} 832 833if(!pack_to_stdout) { 834struct stat st; 835struct strbuf tmpname = STRBUF_INIT; 836 837/* 838 * Packs are runtime accessed in their mtime 839 * order since newer packs are more likely to contain 840 * younger objects. So if we are creating multiple 841 * packs then we should modify the mtime of later ones 842 * to preserve this property. 843 */ 844if(stat(pack_tmp_name, &st) <0) { 845warning_errno("failed to stat%s", pack_tmp_name); 846}else if(!last_mtime) { 847 last_mtime = st.st_mtime; 848}else{ 849struct utimbuf utb; 850 utb.actime = st.st_atime; 851 utb.modtime = --last_mtime; 852if(utime(pack_tmp_name, &utb) <0) 853warning_errno("failed utime() on%s", pack_tmp_name); 854} 855 856strbuf_addf(&tmpname,"%s-", base_name); 857 858if(write_bitmap_index) { 859bitmap_writer_set_checksum(sha1); 860bitmap_writer_build_type_index(written_list, nr_written); 861} 862 863finish_tmp_packfile(&tmpname, pack_tmp_name, 864 written_list, nr_written, 865&pack_idx_opts, sha1); 866 867if(write_bitmap_index) { 868strbuf_addf(&tmpname,"%s.bitmap",sha1_to_hex(sha1)); 869 870stop_progress(&progress_state); 871 872bitmap_writer_show_progress(progress); 873bitmap_writer_reuse_bitmaps(&to_pack); 874bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 875bitmap_writer_build(&to_pack); 876bitmap_writer_finish(written_list, nr_written, 877 tmpname.buf, write_bitmap_options); 878 write_bitmap_index =0; 879} 880 881strbuf_release(&tmpname); 882free(pack_tmp_name); 883puts(sha1_to_hex(sha1)); 884} 885 886/* mark written objects as written to previous pack */ 887for(j =0; j < nr_written; j++) { 888 written_list[j]->offset = (off_t)-1; 889} 890 nr_remaining -= nr_written; 891}while(nr_remaining && i < to_pack.nr_objects); 892 893free(written_list); 894free(write_order); 895stop_progress(&progress_state); 896if(written != nr_result) 897die("wrote %"PRIu32" objects while expecting %"PRIu32, 898 written, nr_result); 899} 900 901static voidsetup_delta_attr_check(struct git_attr_check *check) 902{ 903static struct git_attr *attr_delta; 904 905if(!attr_delta) 906 attr_delta =git_attr("delta"); 907 908 check[0].attr = attr_delta; 909} 910 911static intno_try_delta(const char*path) 912{ 913struct git_attr_check check[1]; 914 915setup_delta_attr_check(check); 916if(git_check_attr(path,ARRAY_SIZE(check), check)) 917return0; 918if(ATTR_FALSE(check->value)) 919return1; 920return0; 921} 922 923/* 924 * When adding an object, check whether we have already added it 925 * to our packing list. If so, we can skip. However, if we are 926 * being asked to excludei t, but the previous mention was to include 927 * it, make sure to adjust its flags and tweak our numbers accordingly. 928 * 929 * As an optimization, we pass out the index position where we would have 930 * found the item, since that saves us from having to look it up again a 931 * few lines later when we want to add the new entry. 932 */ 933static inthave_duplicate_entry(const unsigned char*sha1, 934int exclude, 935uint32_t*index_pos) 936{ 937struct object_entry *entry; 938 939 entry =packlist_find(&to_pack, sha1, index_pos); 940if(!entry) 941return0; 942 943if(exclude) { 944if(!entry->preferred_base) 945 nr_result--; 946 entry->preferred_base =1; 947} 948 949return1; 950} 951 952static intwant_found_object(int exclude,struct packed_git *p) 953{ 954if(exclude) 955return1; 956if(incremental) 957return0; 958 959/* 960 * When asked to do --local (do not include an object that appears in a 961 * pack we borrow from elsewhere) or --honor-pack-keep (do not include 962 * an object that appears in a pack marked with .keep), finding a pack 963 * that matches the criteria is sufficient for us to decide to omit it. 964 * However, even if this pack does not satisfy the criteria, we need to 965 * make sure no copy of this object appears in _any_ pack that makes us 966 * to omit the object, so we need to check all the packs. 967 * 968 * We can however first check whether these options can possible matter; 969 * if they do not matter we know we want the object in generated pack. 970 * Otherwise, we signal "-1" at the end to tell the caller that we do 971 * not know either way, and it needs to check more packs. 972 */ 973if(!ignore_packed_keep && 974(!local || !have_non_local_packs)) 975return1; 976 977if(local && !p->pack_local) 978return0; 979if(ignore_packed_keep && p->pack_local && p->pack_keep) 980return0; 981 982/* we don't know yet; keep looking for more packs */ 983return-1; 984} 985 986/* 987 * Check whether we want the object in the pack (e.g., we do not want 988 * objects found in non-local stores if the "--local" option was used). 989 * 990 * If the caller already knows an existing pack it wants to take the object 991 * from, that is passed in *found_pack and *found_offset; otherwise this 992 * function finds if there is any pack that has the object and returns the pack 993 * and its offset in these variables. 994 */ 995static intwant_object_in_pack(const unsigned char*sha1, 996int exclude, 997struct packed_git **found_pack, 998 off_t *found_offset) 999{1000struct mru_entry *entry;1001int want;10021003if(!exclude && local &&has_loose_object_nonlocal(sha1))1004return0;10051006/*1007 * If we already know the pack object lives in, start checks from that1008 * pack - in the usual case when neither --local was given nor .keep files1009 * are present we will determine the answer right now.1010 */1011if(*found_pack) {1012 want =want_found_object(exclude, *found_pack);1013if(want != -1)1014return want;1015}10161017for(entry = packed_git_mru->head; entry; entry = entry->next) {1018struct packed_git *p = entry->item;1019 off_t offset;10201021if(p == *found_pack)1022 offset = *found_offset;1023else1024 offset =find_pack_entry_one(sha1, p);10251026if(offset) {1027if(!*found_pack) {1028if(!is_pack_valid(p))1029continue;1030*found_offset = offset;1031*found_pack = p;1032}1033 want =want_found_object(exclude, p);1034if(!exclude && want >0)1035mru_mark(packed_git_mru, entry);1036if(want != -1)1037return want;1038}1039}10401041return1;1042}10431044static voidcreate_object_entry(const unsigned char*sha1,1045enum object_type type,1046uint32_t hash,1047int exclude,1048int no_try_delta,1049uint32_t index_pos,1050struct packed_git *found_pack,1051 off_t found_offset)1052{1053struct object_entry *entry;10541055 entry =packlist_alloc(&to_pack, sha1, index_pos);1056 entry->hash = hash;1057if(type)1058 entry->type = type;1059if(exclude)1060 entry->preferred_base =1;1061else1062 nr_result++;1063if(found_pack) {1064 entry->in_pack = found_pack;1065 entry->in_pack_offset = found_offset;1066}10671068 entry->no_try_delta = no_try_delta;1069}10701071static const char no_closure_warning[] =N_(1072"disabling bitmap writing, as some objects are not being packed"1073);10741075static intadd_object_entry(const unsigned char*sha1,enum object_type type,1076const char*name,int exclude)1077{1078struct packed_git *found_pack = NULL;1079 off_t found_offset =0;1080uint32_t index_pos;10811082if(have_duplicate_entry(sha1, exclude, &index_pos))1083return0;10841085if(!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) {1086/* The pack is missing an object, so it will not have closure */1087if(write_bitmap_index) {1088warning(_(no_closure_warning));1089 write_bitmap_index =0;1090}1091return0;1092}10931094create_object_entry(sha1, type,pack_name_hash(name),1095 exclude, name &&no_try_delta(name),1096 index_pos, found_pack, found_offset);10971098display_progress(progress_state, nr_result);1099return1;1100}11011102static intadd_object_entry_from_bitmap(const unsigned char*sha1,1103enum object_type type,1104int flags,uint32_t name_hash,1105struct packed_git *pack, off_t offset)1106{1107uint32_t index_pos;11081109if(have_duplicate_entry(sha1,0, &index_pos))1110return0;11111112if(!want_object_in_pack(sha1,0, &pack, &offset))1113return0;11141115create_object_entry(sha1, type, name_hash,0,0, index_pos, pack, offset);11161117display_progress(progress_state, nr_result);1118return1;1119}11201121struct pbase_tree_cache {1122unsigned char sha1[20];1123int ref;1124int temporary;1125void*tree_data;1126unsigned long tree_size;1127};11281129static struct pbase_tree_cache *(pbase_tree_cache[256]);1130static intpbase_tree_cache_ix(const unsigned char*sha1)1131{1132return sha1[0] %ARRAY_SIZE(pbase_tree_cache);1133}1134static intpbase_tree_cache_ix_incr(int ix)1135{1136return(ix+1) %ARRAY_SIZE(pbase_tree_cache);1137}11381139static struct pbase_tree {1140struct pbase_tree *next;1141/* This is a phony "cache" entry; we are not1142 * going to evict it or find it through _get()1143 * mechanism -- this is for the toplevel node that1144 * would almost always change with any commit.1145 */1146struct pbase_tree_cache pcache;1147} *pbase_tree;11481149static struct pbase_tree_cache *pbase_tree_get(const unsigned char*sha1)1150{1151struct pbase_tree_cache *ent, *nent;1152void*data;1153unsigned long size;1154enum object_type type;1155int neigh;1156int my_ix =pbase_tree_cache_ix(sha1);1157int available_ix = -1;11581159/* pbase-tree-cache acts as a limited hashtable.1160 * your object will be found at your index or within a few1161 * slots after that slot if it is cached.1162 */1163for(neigh =0; neigh <8; neigh++) {1164 ent = pbase_tree_cache[my_ix];1165if(ent && !hashcmp(ent->sha1, sha1)) {1166 ent->ref++;1167return ent;1168}1169else if(((available_ix <0) && (!ent || !ent->ref)) ||1170((0<= available_ix) &&1171(!ent && pbase_tree_cache[available_ix])))1172 available_ix = my_ix;1173if(!ent)1174break;1175 my_ix =pbase_tree_cache_ix_incr(my_ix);1176}11771178/* Did not find one. Either we got a bogus request or1179 * we need to read and perhaps cache.1180 */1181 data =read_sha1_file(sha1, &type, &size);1182if(!data)1183return NULL;1184if(type != OBJ_TREE) {1185free(data);1186return NULL;1187}11881189/* We need to either cache or return a throwaway copy */11901191if(available_ix <0)1192 ent = NULL;1193else{1194 ent = pbase_tree_cache[available_ix];1195 my_ix = available_ix;1196}11971198if(!ent) {1199 nent =xmalloc(sizeof(*nent));1200 nent->temporary = (available_ix <0);1201}1202else{1203/* evict and reuse */1204free(ent->tree_data);1205 nent = ent;1206}1207hashcpy(nent->sha1, sha1);1208 nent->tree_data = data;1209 nent->tree_size = size;1210 nent->ref =1;1211if(!nent->temporary)1212 pbase_tree_cache[my_ix] = nent;1213return nent;1214}12151216static voidpbase_tree_put(struct pbase_tree_cache *cache)1217{1218if(!cache->temporary) {1219 cache->ref--;1220return;1221}1222free(cache->tree_data);1223free(cache);1224}12251226static intname_cmp_len(const char*name)1227{1228int i;1229for(i =0; name[i] && name[i] !='\n'&& name[i] !='/'; i++)1230;1231return i;1232}12331234static voidadd_pbase_object(struct tree_desc *tree,1235const char*name,1236int cmplen,1237const char*fullname)1238{1239struct name_entry entry;1240int cmp;12411242while(tree_entry(tree,&entry)) {1243if(S_ISGITLINK(entry.mode))1244continue;1245 cmp =tree_entry_len(&entry) != cmplen ?1:1246memcmp(name, entry.path, cmplen);1247if(cmp >0)1248continue;1249if(cmp <0)1250return;1251if(name[cmplen] !='/') {1252add_object_entry(entry.oid->hash,1253object_type(entry.mode),1254 fullname,1);1255return;1256}1257if(S_ISDIR(entry.mode)) {1258struct tree_desc sub;1259struct pbase_tree_cache *tree;1260const char*down = name+cmplen+1;1261int downlen =name_cmp_len(down);12621263 tree =pbase_tree_get(entry.oid->hash);1264if(!tree)1265return;1266init_tree_desc(&sub, tree->tree_data, tree->tree_size);12671268add_pbase_object(&sub, down, downlen, fullname);1269pbase_tree_put(tree);1270}1271}1272}12731274static unsigned*done_pbase_paths;1275static int done_pbase_paths_num;1276static int done_pbase_paths_alloc;1277static intdone_pbase_path_pos(unsigned hash)1278{1279int lo =0;1280int hi = done_pbase_paths_num;1281while(lo < hi) {1282int mi = (hi + lo) /2;1283if(done_pbase_paths[mi] == hash)1284return mi;1285if(done_pbase_paths[mi] < hash)1286 hi = mi;1287else1288 lo = mi +1;1289}1290return-lo-1;1291}12921293static intcheck_pbase_path(unsigned hash)1294{1295int pos = (!done_pbase_paths) ? -1:done_pbase_path_pos(hash);1296if(0<= pos)1297return1;1298 pos = -pos -1;1299ALLOC_GROW(done_pbase_paths,1300 done_pbase_paths_num +1,1301 done_pbase_paths_alloc);1302 done_pbase_paths_num++;1303if(pos < done_pbase_paths_num)1304memmove(done_pbase_paths + pos +1,1305 done_pbase_paths + pos,1306(done_pbase_paths_num - pos -1) *sizeof(unsigned));1307 done_pbase_paths[pos] = hash;1308return0;1309}13101311static voidadd_preferred_base_object(const char*name)1312{1313struct pbase_tree *it;1314int cmplen;1315unsigned hash =pack_name_hash(name);13161317if(!num_preferred_base ||check_pbase_path(hash))1318return;13191320 cmplen =name_cmp_len(name);1321for(it = pbase_tree; it; it = it->next) {1322if(cmplen ==0) {1323add_object_entry(it->pcache.sha1, OBJ_TREE, NULL,1);1324}1325else{1326struct tree_desc tree;1327init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1328add_pbase_object(&tree, name, cmplen, name);1329}1330}1331}13321333static voidadd_preferred_base(unsigned char*sha1)1334{1335struct pbase_tree *it;1336void*data;1337unsigned long size;1338unsigned char tree_sha1[20];13391340if(window <= num_preferred_base++)1341return;13421343 data =read_object_with_reference(sha1, tree_type, &size, tree_sha1);1344if(!data)1345return;13461347for(it = pbase_tree; it; it = it->next) {1348if(!hashcmp(it->pcache.sha1, tree_sha1)) {1349free(data);1350return;1351}1352}13531354 it =xcalloc(1,sizeof(*it));1355 it->next = pbase_tree;1356 pbase_tree = it;13571358hashcpy(it->pcache.sha1, tree_sha1);1359 it->pcache.tree_data = data;1360 it->pcache.tree_size = size;1361}13621363static voidcleanup_preferred_base(void)1364{1365struct pbase_tree *it;1366unsigned i;13671368 it = pbase_tree;1369 pbase_tree = NULL;1370while(it) {1371struct pbase_tree *this= it;1372 it =this->next;1373free(this->pcache.tree_data);1374free(this);1375}13761377for(i =0; i <ARRAY_SIZE(pbase_tree_cache); i++) {1378if(!pbase_tree_cache[i])1379continue;1380free(pbase_tree_cache[i]->tree_data);1381free(pbase_tree_cache[i]);1382 pbase_tree_cache[i] = NULL;1383}13841385free(done_pbase_paths);1386 done_pbase_paths = NULL;1387 done_pbase_paths_num = done_pbase_paths_alloc =0;1388}13891390static voidcheck_object(struct object_entry *entry)1391{1392if(entry->in_pack) {1393struct packed_git *p = entry->in_pack;1394struct pack_window *w_curs = NULL;1395const unsigned char*base_ref = NULL;1396struct object_entry *base_entry;1397unsigned long used, used_0;1398unsigned long avail;1399 off_t ofs;1400unsigned char*buf, c;14011402 buf =use_pack(p, &w_curs, entry->in_pack_offset, &avail);14031404/*1405 * We want in_pack_type even if we do not reuse delta1406 * since non-delta representations could still be reused.1407 */1408 used =unpack_object_header_buffer(buf, avail,1409&entry->in_pack_type,1410&entry->size);1411if(used ==0)1412goto give_up;14131414/*1415 * Determine if this is a delta and if so whether we can1416 * reuse it or not. Otherwise let's find out as cheaply as1417 * possible what the actual type and size for this object is.1418 */1419switch(entry->in_pack_type) {1420default:1421/* Not a delta hence we've already got all we need. */1422 entry->type = entry->in_pack_type;1423 entry->in_pack_header_size = used;1424if(entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)1425goto give_up;1426unuse_pack(&w_curs);1427return;1428case OBJ_REF_DELTA:1429if(reuse_delta && !entry->preferred_base)1430 base_ref =use_pack(p, &w_curs,1431 entry->in_pack_offset + used, NULL);1432 entry->in_pack_header_size = used +20;1433break;1434case OBJ_OFS_DELTA:1435 buf =use_pack(p, &w_curs,1436 entry->in_pack_offset + used, NULL);1437 used_0 =0;1438 c = buf[used_0++];1439 ofs = c &127;1440while(c &128) {1441 ofs +=1;1442if(!ofs ||MSB(ofs,7)) {1443error("delta base offset overflow in pack for%s",1444sha1_to_hex(entry->idx.sha1));1445goto give_up;1446}1447 c = buf[used_0++];1448 ofs = (ofs <<7) + (c &127);1449}1450 ofs = entry->in_pack_offset - ofs;1451if(ofs <=0|| ofs >= entry->in_pack_offset) {1452error("delta base offset out of bound for%s",1453sha1_to_hex(entry->idx.sha1));1454goto give_up;1455}1456if(reuse_delta && !entry->preferred_base) {1457struct revindex_entry *revidx;1458 revidx =find_pack_revindex(p, ofs);1459if(!revidx)1460goto give_up;1461 base_ref =nth_packed_object_sha1(p, revidx->nr);1462}1463 entry->in_pack_header_size = used + used_0;1464break;1465}14661467if(base_ref && (base_entry =packlist_find(&to_pack, base_ref, NULL))) {1468/*1469 * If base_ref was set above that means we wish to1470 * reuse delta data, and we even found that base1471 * in the list of objects we want to pack. Goodie!1472 *1473 * Depth value does not matter - find_deltas() will1474 * never consider reused delta as the base object to1475 * deltify other objects against, in order to avoid1476 * circular deltas.1477 */1478 entry->type = entry->in_pack_type;1479 entry->delta = base_entry;1480 entry->delta_size = entry->size;1481 entry->delta_sibling = base_entry->delta_child;1482 base_entry->delta_child = entry;1483unuse_pack(&w_curs);1484return;1485}14861487if(entry->type) {1488/*1489 * This must be a delta and we already know what the1490 * final object type is. Let's extract the actual1491 * object size from the delta header.1492 */1493 entry->size =get_size_from_delta(p, &w_curs,1494 entry->in_pack_offset + entry->in_pack_header_size);1495if(entry->size ==0)1496goto give_up;1497unuse_pack(&w_curs);1498return;1499}15001501/*1502 * No choice but to fall back to the recursive delta walk1503 * with sha1_object_info() to find about the object type1504 * at this point...1505 */1506 give_up:1507unuse_pack(&w_curs);1508}15091510 entry->type =sha1_object_info(entry->idx.sha1, &entry->size);1511/*1512 * The error condition is checked in prepare_pack(). This is1513 * to permit a missing preferred base object to be ignored1514 * as a preferred base. Doing so can result in a larger1515 * pack file, but the transfer will still take place.1516 */1517}15181519static intpack_offset_sort(const void*_a,const void*_b)1520{1521const struct object_entry *a = *(struct object_entry **)_a;1522const struct object_entry *b = *(struct object_entry **)_b;15231524/* avoid filesystem trashing with loose objects */1525if(!a->in_pack && !b->in_pack)1526returnhashcmp(a->idx.sha1, b->idx.sha1);15271528if(a->in_pack < b->in_pack)1529return-1;1530if(a->in_pack > b->in_pack)1531return1;1532return a->in_pack_offset < b->in_pack_offset ? -1:1533(a->in_pack_offset > b->in_pack_offset);1534}15351536/*1537 * Drop an on-disk delta we were planning to reuse. Naively, this would1538 * just involve blanking out the "delta" field, but we have to deal1539 * with some extra book-keeping:1540 *1541 * 1. Removing ourselves from the delta_sibling linked list.1542 *1543 * 2. Updating our size/type to the non-delta representation. These were1544 * either not recorded initially (size) or overwritten with the delta type1545 * (type) when check_object() decided to reuse the delta.1546 */1547static voiddrop_reused_delta(struct object_entry *entry)1548{1549struct object_entry **p = &entry->delta->delta_child;1550struct object_info oi = OBJECT_INFO_INIT;15511552while(*p) {1553if(*p == entry)1554*p = (*p)->delta_sibling;1555else1556 p = &(*p)->delta_sibling;1557}1558 entry->delta = NULL;15591560 oi.sizep = &entry->size;1561 oi.typep = &entry->type;1562if(packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) <0) {1563/*1564 * We failed to get the info from this pack for some reason;1565 * fall back to sha1_object_info, which may find another copy.1566 * And if that fails, the error will be recorded in entry->type1567 * and dealt with in prepare_pack().1568 */1569 entry->type =sha1_object_info(entry->idx.sha1, &entry->size);1570}1571}15721573/*1574 * Follow the chain of deltas from this entry onward, throwing away any links1575 * that cause us to hit a cycle (as determined by the DFS state flags in1576 * the entries).1577 */1578static voidbreak_delta_chains(struct object_entry *entry)1579{1580/* If it's not a delta, it can't be part of a cycle. */1581if(!entry->delta) {1582 entry->dfs_state = DFS_DONE;1583return;1584}15851586switch(entry->dfs_state) {1587case DFS_NONE:1588/*1589 * This is the first time we've seen the object. We mark it as1590 * part of the active potential cycle and recurse.1591 */1592 entry->dfs_state = DFS_ACTIVE;1593break_delta_chains(entry->delta);1594 entry->dfs_state = DFS_DONE;1595break;15961597case DFS_DONE:1598/* object already examined, and not part of a cycle */1599break;16001601case DFS_ACTIVE:1602/*1603 * We found a cycle that needs broken. It would be correct to1604 * break any link in the chain, but it's convenient to1605 * break this one.1606 */1607drop_reused_delta(entry);1608 entry->dfs_state = DFS_DONE;1609break;1610}1611}16121613static voidget_object_details(void)1614{1615uint32_t i;1616struct object_entry **sorted_by_offset;16171618 sorted_by_offset =xcalloc(to_pack.nr_objects,sizeof(struct object_entry *));1619for(i =0; i < to_pack.nr_objects; i++)1620 sorted_by_offset[i] = to_pack.objects + i;1621QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);16221623for(i =0; i < to_pack.nr_objects; i++) {1624struct object_entry *entry = sorted_by_offset[i];1625check_object(entry);1626if(big_file_threshold < entry->size)1627 entry->no_try_delta =1;1628}16291630/*1631 * This must happen in a second pass, since we rely on the delta1632 * information for the whole list being completed.1633 */1634for(i =0; i < to_pack.nr_objects; i++)1635break_delta_chains(&to_pack.objects[i]);16361637free(sorted_by_offset);1638}16391640/*1641 * We search for deltas in a list sorted by type, by filename hash, and then1642 * by size, so that we see progressively smaller and smaller files.1643 * That's because we prefer deltas to be from the bigger file1644 * to the smaller -- deletes are potentially cheaper, but perhaps1645 * more importantly, the bigger file is likely the more recent1646 * one. The deepest deltas are therefore the oldest objects which are1647 * less susceptible to be accessed often.1648 */1649static inttype_size_sort(const void*_a,const void*_b)1650{1651const struct object_entry *a = *(struct object_entry **)_a;1652const struct object_entry *b = *(struct object_entry **)_b;16531654if(a->type > b->type)1655return-1;1656if(a->type < b->type)1657return1;1658if(a->hash > b->hash)1659return-1;1660if(a->hash < b->hash)1661return1;1662if(a->preferred_base > b->preferred_base)1663return-1;1664if(a->preferred_base < b->preferred_base)1665return1;1666if(a->size > b->size)1667return-1;1668if(a->size < b->size)1669return1;1670return a < b ? -1: (a > b);/* newest first */1671}16721673struct unpacked {1674struct object_entry *entry;1675void*data;1676struct delta_index *index;1677unsigned depth;1678};16791680static intdelta_cacheable(unsigned long src_size,unsigned long trg_size,1681unsigned long delta_size)1682{1683if(max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1684return0;16851686if(delta_size < cache_max_small_delta_size)1687return1;16881689/* cache delta, if objects are large enough compared to delta size */1690if((src_size >>20) + (trg_size >>21) > (delta_size >>10))1691return1;16921693return0;1694}16951696#ifndef NO_PTHREADS16971698static pthread_mutex_t read_mutex;1699#define read_lock() pthread_mutex_lock(&read_mutex)1700#define read_unlock() pthread_mutex_unlock(&read_mutex)17011702static pthread_mutex_t cache_mutex;1703#define cache_lock() pthread_mutex_lock(&cache_mutex)1704#define cache_unlock() pthread_mutex_unlock(&cache_mutex)17051706static pthread_mutex_t progress_mutex;1707#define progress_lock() pthread_mutex_lock(&progress_mutex)1708#define progress_unlock() pthread_mutex_unlock(&progress_mutex)17091710#else17111712#define read_lock() (void)01713#define read_unlock() (void)01714#define cache_lock() (void)01715#define cache_unlock() (void)01716#define progress_lock() (void)01717#define progress_unlock() (void)017181719#endif17201721static inttry_delta(struct unpacked *trg,struct unpacked *src,1722unsigned max_depth,unsigned long*mem_usage)1723{1724struct object_entry *trg_entry = trg->entry;1725struct object_entry *src_entry = src->entry;1726unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1727unsigned ref_depth;1728enum object_type type;1729void*delta_buf;17301731/* Don't bother doing diffs between different types */1732if(trg_entry->type != src_entry->type)1733return-1;17341735/*1736 * We do not bother to try a delta that we discarded on an1737 * earlier try, but only when reusing delta data. Note that1738 * src_entry that is marked as the preferred_base should always1739 * be considered, as even if we produce a suboptimal delta against1740 * it, we will still save the transfer cost, as we already know1741 * the other side has it and we won't send src_entry at all.1742 */1743if(reuse_delta && trg_entry->in_pack &&1744 trg_entry->in_pack == src_entry->in_pack &&1745!src_entry->preferred_base &&1746 trg_entry->in_pack_type != OBJ_REF_DELTA &&1747 trg_entry->in_pack_type != OBJ_OFS_DELTA)1748return0;17491750/* Let's not bust the allowed depth. */1751if(src->depth >= max_depth)1752return0;17531754/* Now some size filtering heuristics. */1755 trg_size = trg_entry->size;1756if(!trg_entry->delta) {1757 max_size = trg_size/2-20;1758 ref_depth =1;1759}else{1760 max_size = trg_entry->delta_size;1761 ref_depth = trg->depth;1762}1763 max_size = (uint64_t)max_size * (max_depth - src->depth) /1764(max_depth - ref_depth +1);1765if(max_size ==0)1766return0;1767 src_size = src_entry->size;1768 sizediff = src_size < trg_size ? trg_size - src_size :0;1769if(sizediff >= max_size)1770return0;1771if(trg_size < src_size /32)1772return0;17731774/* Load data if not already done */1775if(!trg->data) {1776read_lock();1777 trg->data =read_sha1_file(trg_entry->idx.sha1, &type, &sz);1778read_unlock();1779if(!trg->data)1780die("object%scannot be read",1781sha1_to_hex(trg_entry->idx.sha1));1782if(sz != trg_size)1783die("object%sinconsistent object length (%lu vs%lu)",1784sha1_to_hex(trg_entry->idx.sha1), sz, trg_size);1785*mem_usage += sz;1786}1787if(!src->data) {1788read_lock();1789 src->data =read_sha1_file(src_entry->idx.sha1, &type, &sz);1790read_unlock();1791if(!src->data) {1792if(src_entry->preferred_base) {1793static int warned =0;1794if(!warned++)1795warning("object%scannot be read",1796sha1_to_hex(src_entry->idx.sha1));1797/*1798 * Those objects are not included in the1799 * resulting pack. Be resilient and ignore1800 * them if they can't be read, in case the1801 * pack could be created nevertheless.1802 */1803return0;1804}1805die("object%scannot be read",1806sha1_to_hex(src_entry->idx.sha1));1807}1808if(sz != src_size)1809die("object%sinconsistent object length (%lu vs%lu)",1810sha1_to_hex(src_entry->idx.sha1), sz, src_size);1811*mem_usage += sz;1812}1813if(!src->index) {1814 src->index =create_delta_index(src->data, src_size);1815if(!src->index) {1816static int warned =0;1817if(!warned++)1818warning("suboptimal pack - out of memory");1819return0;1820}1821*mem_usage +=sizeof_delta_index(src->index);1822}18231824 delta_buf =create_delta(src->index, trg->data, trg_size, &delta_size, max_size);1825if(!delta_buf)1826return0;18271828if(trg_entry->delta) {1829/* Prefer only shallower same-sized deltas. */1830if(delta_size == trg_entry->delta_size &&1831 src->depth +1>= trg->depth) {1832free(delta_buf);1833return0;1834}1835}18361837/*1838 * Handle memory allocation outside of the cache1839 * accounting lock. Compiler will optimize the strangeness1840 * away when NO_PTHREADS is defined.1841 */1842free(trg_entry->delta_data);1843cache_lock();1844if(trg_entry->delta_data) {1845 delta_cache_size -= trg_entry->delta_size;1846 trg_entry->delta_data = NULL;1847}1848if(delta_cacheable(src_size, trg_size, delta_size)) {1849 delta_cache_size += delta_size;1850cache_unlock();1851 trg_entry->delta_data =xrealloc(delta_buf, delta_size);1852}else{1853cache_unlock();1854free(delta_buf);1855}18561857 trg_entry->delta = src_entry;1858 trg_entry->delta_size = delta_size;1859 trg->depth = src->depth +1;18601861return1;1862}18631864static unsigned intcheck_delta_limit(struct object_entry *me,unsigned int n)1865{1866struct object_entry *child = me->delta_child;1867unsigned int m = n;1868while(child) {1869unsigned int c =check_delta_limit(child, n +1);1870if(m < c)1871 m = c;1872 child = child->delta_sibling;1873}1874return m;1875}18761877static unsigned longfree_unpacked(struct unpacked *n)1878{1879unsigned long freed_mem =sizeof_delta_index(n->index);1880free_delta_index(n->index);1881 n->index = NULL;1882if(n->data) {1883 freed_mem += n->entry->size;1884free(n->data);1885 n->data = NULL;1886}1887 n->entry = NULL;1888 n->depth =0;1889return freed_mem;1890}18911892static voidfind_deltas(struct object_entry **list,unsigned*list_size,1893int window,int depth,unsigned*processed)1894{1895uint32_t i, idx =0, count =0;1896struct unpacked *array;1897unsigned long mem_usage =0;18981899 array =xcalloc(window,sizeof(struct unpacked));19001901for(;;) {1902struct object_entry *entry;1903struct unpacked *n = array + idx;1904int j, max_depth, best_base = -1;19051906progress_lock();1907if(!*list_size) {1908progress_unlock();1909break;1910}1911 entry = *list++;1912(*list_size)--;1913if(!entry->preferred_base) {1914(*processed)++;1915display_progress(progress_state, *processed);1916}1917progress_unlock();19181919 mem_usage -=free_unpacked(n);1920 n->entry = entry;19211922while(window_memory_limit &&1923 mem_usage > window_memory_limit &&1924 count >1) {1925uint32_t tail = (idx + window - count) % window;1926 mem_usage -=free_unpacked(array + tail);1927 count--;1928}19291930/* We do not compute delta to *create* objects we are not1931 * going to pack.1932 */1933if(entry->preferred_base)1934goto next;19351936/*1937 * If the current object is at pack edge, take the depth the1938 * objects that depend on the current object into account1939 * otherwise they would become too deep.1940 */1941 max_depth = depth;1942if(entry->delta_child) {1943 max_depth -=check_delta_limit(entry,0);1944if(max_depth <=0)1945goto next;1946}19471948 j = window;1949while(--j >0) {1950int ret;1951uint32_t other_idx = idx + j;1952struct unpacked *m;1953if(other_idx >= window)1954 other_idx -= window;1955 m = array + other_idx;1956if(!m->entry)1957break;1958 ret =try_delta(n, m, max_depth, &mem_usage);1959if(ret <0)1960break;1961else if(ret >0)1962 best_base = other_idx;1963}19641965/*1966 * If we decided to cache the delta data, then it is best1967 * to compress it right away. First because we have to do1968 * it anyway, and doing it here while we're threaded will1969 * save a lot of time in the non threaded write phase,1970 * as well as allow for caching more deltas within1971 * the same cache size limit.1972 * ...1973 * But only if not writing to stdout, since in that case1974 * the network is most likely throttling writes anyway,1975 * and therefore it is best to go to the write phase ASAP1976 * instead, as we can afford spending more time compressing1977 * between writes at that moment.1978 */1979if(entry->delta_data && !pack_to_stdout) {1980 entry->z_delta_size =do_compress(&entry->delta_data,1981 entry->delta_size);1982cache_lock();1983 delta_cache_size -= entry->delta_size;1984 delta_cache_size += entry->z_delta_size;1985cache_unlock();1986}19871988/* if we made n a delta, and if n is already at max1989 * depth, leaving it in the window is pointless. we1990 * should evict it first.1991 */1992if(entry->delta && max_depth <= n->depth)1993continue;19941995/*1996 * Move the best delta base up in the window, after the1997 * currently deltified object, to keep it longer. It will1998 * be the first base object to be attempted next.1999 */2000if(entry->delta) {2001struct unpacked swap = array[best_base];2002int dist = (window + idx - best_base) % window;2003int dst = best_base;2004while(dist--) {2005int src = (dst +1) % window;2006 array[dst] = array[src];2007 dst = src;2008}2009 array[dst] = swap;2010}20112012 next:2013 idx++;2014if(count +1< window)2015 count++;2016if(idx >= window)2017 idx =0;2018}20192020for(i =0; i < window; ++i) {2021free_delta_index(array[i].index);2022free(array[i].data);2023}2024free(array);2025}20262027#ifndef NO_PTHREADS20282029static voidtry_to_free_from_threads(size_t size)2030{2031read_lock();2032release_pack_memory(size);2033read_unlock();2034}20352036static try_to_free_t old_try_to_free_routine;20372038/*2039 * The main thread waits on the condition that (at least) one of the workers2040 * has stopped working (which is indicated in the .working member of2041 * struct thread_params).2042 * When a work thread has completed its work, it sets .working to 0 and2043 * signals the main thread and waits on the condition that .data_ready2044 * becomes 1.2045 */20462047struct thread_params {2048 pthread_t thread;2049struct object_entry **list;2050unsigned list_size;2051unsigned remaining;2052int window;2053int depth;2054int working;2055int data_ready;2056 pthread_mutex_t mutex;2057 pthread_cond_t cond;2058unsigned*processed;2059};20602061static pthread_cond_t progress_cond;20622063/*2064 * Mutex and conditional variable can't be statically-initialized on Windows.2065 */2066static voidinit_threaded_search(void)2067{2068init_recursive_mutex(&read_mutex);2069pthread_mutex_init(&cache_mutex, NULL);2070pthread_mutex_init(&progress_mutex, NULL);2071pthread_cond_init(&progress_cond, NULL);2072 old_try_to_free_routine =set_try_to_free_routine(try_to_free_from_threads);2073}20742075static voidcleanup_threaded_search(void)2076{2077set_try_to_free_routine(old_try_to_free_routine);2078pthread_cond_destroy(&progress_cond);2079pthread_mutex_destroy(&read_mutex);2080pthread_mutex_destroy(&cache_mutex);2081pthread_mutex_destroy(&progress_mutex);2082}20832084static void*threaded_find_deltas(void*arg)2085{2086struct thread_params *me = arg;20872088while(me->remaining) {2089find_deltas(me->list, &me->remaining,2090 me->window, me->depth, me->processed);20912092progress_lock();2093 me->working =0;2094pthread_cond_signal(&progress_cond);2095progress_unlock();20962097/*2098 * We must not set ->data_ready before we wait on the2099 * condition because the main thread may have set it to 12100 * before we get here. In order to be sure that new2101 * work is available if we see 1 in ->data_ready, it2102 * was initialized to 0 before this thread was spawned2103 * and we reset it to 0 right away.2104 */2105pthread_mutex_lock(&me->mutex);2106while(!me->data_ready)2107pthread_cond_wait(&me->cond, &me->mutex);2108 me->data_ready =0;2109pthread_mutex_unlock(&me->mutex);2110}2111/* leave ->working 1 so that this doesn't get more work assigned */2112return NULL;2113}21142115static voidll_find_deltas(struct object_entry **list,unsigned list_size,2116int window,int depth,unsigned*processed)2117{2118struct thread_params *p;2119int i, ret, active_threads =0;21202121init_threaded_search();21222123if(delta_search_threads <=1) {2124find_deltas(list, &list_size, window, depth, processed);2125cleanup_threaded_search();2126return;2127}2128if(progress > pack_to_stdout)2129fprintf(stderr,"Delta compression using up to%dthreads.\n",2130 delta_search_threads);2131 p =xcalloc(delta_search_threads,sizeof(*p));21322133/* Partition the work amongst work threads. */2134for(i =0; i < delta_search_threads; i++) {2135unsigned sub_size = list_size / (delta_search_threads - i);21362137/* don't use too small segments or no deltas will be found */2138if(sub_size <2*window && i+1< delta_search_threads)2139 sub_size =0;21402141 p[i].window = window;2142 p[i].depth = depth;2143 p[i].processed = processed;2144 p[i].working =1;2145 p[i].data_ready =0;21462147/* try to split chunks on "path" boundaries */2148while(sub_size && sub_size < list_size &&2149 list[sub_size]->hash &&2150 list[sub_size]->hash == list[sub_size-1]->hash)2151 sub_size++;21522153 p[i].list = list;2154 p[i].list_size = sub_size;2155 p[i].remaining = sub_size;21562157 list += sub_size;2158 list_size -= sub_size;2159}21602161/* Start work threads. */2162for(i =0; i < delta_search_threads; i++) {2163if(!p[i].list_size)2164continue;2165pthread_mutex_init(&p[i].mutex, NULL);2166pthread_cond_init(&p[i].cond, NULL);2167 ret =pthread_create(&p[i].thread, NULL,2168 threaded_find_deltas, &p[i]);2169if(ret)2170die("unable to create thread:%s",strerror(ret));2171 active_threads++;2172}21732174/*2175 * Now let's wait for work completion. Each time a thread is done2176 * with its work, we steal half of the remaining work from the2177 * thread with the largest number of unprocessed objects and give2178 * it to that newly idle thread. This ensure good load balancing2179 * until the remaining object list segments are simply too short2180 * to be worth splitting anymore.2181 */2182while(active_threads) {2183struct thread_params *target = NULL;2184struct thread_params *victim = NULL;2185unsigned sub_size =0;21862187progress_lock();2188for(;;) {2189for(i =0; !target && i < delta_search_threads; i++)2190if(!p[i].working)2191 target = &p[i];2192if(target)2193break;2194pthread_cond_wait(&progress_cond, &progress_mutex);2195}21962197for(i =0; i < delta_search_threads; i++)2198if(p[i].remaining >2*window &&2199(!victim || victim->remaining < p[i].remaining))2200 victim = &p[i];2201if(victim) {2202 sub_size = victim->remaining /2;2203 list = victim->list + victim->list_size - sub_size;2204while(sub_size && list[0]->hash &&2205 list[0]->hash == list[-1]->hash) {2206 list++;2207 sub_size--;2208}2209if(!sub_size) {2210/*2211 * It is possible for some "paths" to have2212 * so many objects that no hash boundary2213 * might be found. Let's just steal the2214 * exact half in that case.2215 */2216 sub_size = victim->remaining /2;2217 list -= sub_size;2218}2219 target->list = list;2220 victim->list_size -= sub_size;2221 victim->remaining -= sub_size;2222}2223 target->list_size = sub_size;2224 target->remaining = sub_size;2225 target->working =1;2226progress_unlock();22272228pthread_mutex_lock(&target->mutex);2229 target->data_ready =1;2230pthread_cond_signal(&target->cond);2231pthread_mutex_unlock(&target->mutex);22322233if(!sub_size) {2234pthread_join(target->thread, NULL);2235pthread_cond_destroy(&target->cond);2236pthread_mutex_destroy(&target->mutex);2237 active_threads--;2238}2239}2240cleanup_threaded_search();2241free(p);2242}22432244#else2245#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2246#endif22472248static voidadd_tag_chain(const struct object_id *oid)2249{2250struct tag *tag;22512252/*2253 * We catch duplicates already in add_object_entry(), but we'd2254 * prefer to do this extra check to avoid having to parse the2255 * tag at all if we already know that it's being packed (e.g., if2256 * it was included via bitmaps, we would not have parsed it2257 * previously).2258 */2259if(packlist_find(&to_pack, oid->hash, NULL))2260return;22612262 tag =lookup_tag(oid->hash);2263while(1) {2264if(!tag ||parse_tag(tag) || !tag->tagged)2265die("unable to pack objects reachable from tag%s",2266oid_to_hex(oid));22672268add_object_entry(tag->object.oid.hash, OBJ_TAG, NULL,0);22692270if(tag->tagged->type != OBJ_TAG)2271return;22722273 tag = (struct tag *)tag->tagged;2274}2275}22762277static intadd_ref_tag(const char*path,const struct object_id *oid,int flag,void*cb_data)2278{2279struct object_id peeled;22802281if(starts_with(path,"refs/tags/") &&/* is a tag? */2282!peel_ref(path, peeled.hash) &&/* peelable? */2283packlist_find(&to_pack, peeled.hash, NULL))/* object packed? */2284add_tag_chain(oid);2285return0;2286}22872288static voidprepare_pack(int window,int depth)2289{2290struct object_entry **delta_list;2291uint32_t i, nr_deltas;2292unsigned n;22932294get_object_details();22952296/*2297 * If we're locally repacking then we need to be doubly careful2298 * from now on in order to make sure no stealth corruption gets2299 * propagated to the new pack. Clients receiving streamed packs2300 * should validate everything they get anyway so no need to incur2301 * the additional cost here in that case.2302 */2303if(!pack_to_stdout)2304 do_check_packed_object_crc =1;23052306if(!to_pack.nr_objects || !window || !depth)2307return;23082309ALLOC_ARRAY(delta_list, to_pack.nr_objects);2310 nr_deltas = n =0;23112312for(i =0; i < to_pack.nr_objects; i++) {2313struct object_entry *entry = to_pack.objects + i;23142315if(entry->delta)2316/* This happens if we decided to reuse existing2317 * delta from a pack. "reuse_delta &&" is implied.2318 */2319continue;23202321if(entry->size <50)2322continue;23232324if(entry->no_try_delta)2325continue;23262327if(!entry->preferred_base) {2328 nr_deltas++;2329if(entry->type <0)2330die("unable to get type of object%s",2331sha1_to_hex(entry->idx.sha1));2332}else{2333if(entry->type <0) {2334/*2335 * This object is not found, but we2336 * don't have to include it anyway.2337 */2338continue;2339}2340}23412342 delta_list[n++] = entry;2343}23442345if(nr_deltas && n >1) {2346unsigned nr_done =0;2347if(progress)2348 progress_state =start_progress(_("Compressing objects"),2349 nr_deltas);2350QSORT(delta_list, n, type_size_sort);2351ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2352stop_progress(&progress_state);2353if(nr_done != nr_deltas)2354die("inconsistency with delta count");2355}2356free(delta_list);2357}23582359static intgit_pack_config(const char*k,const char*v,void*cb)2360{2361if(!strcmp(k,"pack.window")) {2362 window =git_config_int(k, v);2363return0;2364}2365if(!strcmp(k,"pack.windowmemory")) {2366 window_memory_limit =git_config_ulong(k, v);2367return0;2368}2369if(!strcmp(k,"pack.depth")) {2370 depth =git_config_int(k, v);2371return0;2372}2373if(!strcmp(k,"pack.deltacachesize")) {2374 max_delta_cache_size =git_config_int(k, v);2375return0;2376}2377if(!strcmp(k,"pack.deltacachelimit")) {2378 cache_max_small_delta_size =git_config_int(k, v);2379return0;2380}2381if(!strcmp(k,"pack.writebitmaphashcache")) {2382if(git_config_bool(k, v))2383 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2384else2385 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2386}2387if(!strcmp(k,"pack.usebitmaps")) {2388 use_bitmap_index_default =git_config_bool(k, v);2389return0;2390}2391if(!strcmp(k,"pack.threads")) {2392 delta_search_threads =git_config_int(k, v);2393if(delta_search_threads <0)2394die("invalid number of threads specified (%d)",2395 delta_search_threads);2396#ifdef NO_PTHREADS2397if(delta_search_threads !=1)2398warning("no threads support, ignoring%s", k);2399#endif2400return0;2401}2402if(!strcmp(k,"pack.indexversion")) {2403 pack_idx_opts.version =git_config_int(k, v);2404if(pack_idx_opts.version >2)2405die("bad pack.indexversion=%"PRIu32,2406 pack_idx_opts.version);2407return0;2408}2409returngit_default_config(k, v, cb);2410}24112412static voidread_object_list_from_stdin(void)2413{2414char line[40+1+ PATH_MAX +2];2415unsigned char sha1[20];24162417for(;;) {2418if(!fgets(line,sizeof(line), stdin)) {2419if(feof(stdin))2420break;2421if(!ferror(stdin))2422die("fgets returned NULL, not EOF, not error!");2423if(errno != EINTR)2424die_errno("fgets");2425clearerr(stdin);2426continue;2427}2428if(line[0] =='-') {2429if(get_sha1_hex(line+1, sha1))2430die("expected edge sha1, got garbage:\n%s",2431 line);2432add_preferred_base(sha1);2433continue;2434}2435if(get_sha1_hex(line, sha1))2436die("expected sha1, got garbage:\n%s", line);24372438add_preferred_base_object(line+41);2439add_object_entry(sha1,0, line+41,0);2440}2441}24422443#define OBJECT_ADDED (1u<<20)24442445static voidshow_commit(struct commit *commit,void*data)2446{2447add_object_entry(commit->object.oid.hash, OBJ_COMMIT, NULL,0);2448 commit->object.flags |= OBJECT_ADDED;24492450if(write_bitmap_index)2451index_commit_for_bitmap(commit);2452}24532454static voidshow_object(struct object *obj,const char*name,void*data)2455{2456add_preferred_base_object(name);2457add_object_entry(obj->oid.hash, obj->type, name,0);2458 obj->flags |= OBJECT_ADDED;2459}24602461static voidshow_edge(struct commit *commit)2462{2463add_preferred_base(commit->object.oid.hash);2464}24652466struct in_pack_object {2467 off_t offset;2468struct object *object;2469};24702471struct in_pack {2472int alloc;2473int nr;2474struct in_pack_object *array;2475};24762477static voidmark_in_pack_object(struct object *object,struct packed_git *p,struct in_pack *in_pack)2478{2479 in_pack->array[in_pack->nr].offset =find_pack_entry_one(object->oid.hash, p);2480 in_pack->array[in_pack->nr].object = object;2481 in_pack->nr++;2482}24832484/*2485 * Compare the objects in the offset order, in order to emulate the2486 * "git rev-list --objects" output that produced the pack originally.2487 */2488static intofscmp(const void*a_,const void*b_)2489{2490struct in_pack_object *a = (struct in_pack_object *)a_;2491struct in_pack_object *b = (struct in_pack_object *)b_;24922493if(a->offset < b->offset)2494return-1;2495else if(a->offset > b->offset)2496return1;2497else2498returnoidcmp(&a->object->oid, &b->object->oid);2499}25002501static voidadd_objects_in_unpacked_packs(struct rev_info *revs)2502{2503struct packed_git *p;2504struct in_pack in_pack;2505uint32_t i;25062507memset(&in_pack,0,sizeof(in_pack));25082509for(p = packed_git; p; p = p->next) {2510const unsigned char*sha1;2511struct object *o;25122513if(!p->pack_local || p->pack_keep)2514continue;2515if(open_pack_index(p))2516die("cannot open pack index");25172518ALLOC_GROW(in_pack.array,2519 in_pack.nr + p->num_objects,2520 in_pack.alloc);25212522for(i =0; i < p->num_objects; i++) {2523 sha1 =nth_packed_object_sha1(p, i);2524 o =lookup_unknown_object(sha1);2525if(!(o->flags & OBJECT_ADDED))2526mark_in_pack_object(o, p, &in_pack);2527 o->flags |= OBJECT_ADDED;2528}2529}25302531if(in_pack.nr) {2532QSORT(in_pack.array, in_pack.nr, ofscmp);2533for(i =0; i < in_pack.nr; i++) {2534struct object *o = in_pack.array[i].object;2535add_object_entry(o->oid.hash, o->type,"",0);2536}2537}2538free(in_pack.array);2539}25402541static intadd_loose_object(const unsigned char*sha1,const char*path,2542void*data)2543{2544enum object_type type =sha1_object_info(sha1, NULL);25452546if(type <0) {2547warning("loose object at%scould not be examined", path);2548return0;2549}25502551add_object_entry(sha1, type,"",0);2552return0;2553}25542555/*2556 * We actually don't even have to worry about reachability here.2557 * add_object_entry will weed out duplicates, so we just add every2558 * loose object we find.2559 */2560static voidadd_unreachable_loose_objects(void)2561{2562for_each_loose_file_in_objdir(get_object_directory(),2563 add_loose_object,2564 NULL, NULL, NULL);2565}25662567static inthas_sha1_pack_kept_or_nonlocal(const unsigned char*sha1)2568{2569static struct packed_git *last_found = (void*)1;2570struct packed_git *p;25712572 p = (last_found != (void*)1) ? last_found : packed_git;25732574while(p) {2575if((!p->pack_local || p->pack_keep) &&2576find_pack_entry_one(sha1, p)) {2577 last_found = p;2578return1;2579}2580if(p == last_found)2581 p = packed_git;2582else2583 p = p->next;2584if(p == last_found)2585 p = p->next;2586}2587return0;2588}25892590/*2591 * Store a list of sha1s that are should not be discarded2592 * because they are either written too recently, or are2593 * reachable from another object that was.2594 *2595 * This is filled by get_object_list.2596 */2597static struct sha1_array recent_objects;25982599static intloosened_object_can_be_discarded(const unsigned char*sha1,2600unsigned long mtime)2601{2602if(!unpack_unreachable_expiration)2603return0;2604if(mtime > unpack_unreachable_expiration)2605return0;2606if(sha1_array_lookup(&recent_objects, sha1) >=0)2607return0;2608return1;2609}26102611static voidloosen_unused_packed_objects(struct rev_info *revs)2612{2613struct packed_git *p;2614uint32_t i;2615const unsigned char*sha1;26162617for(p = packed_git; p; p = p->next) {2618if(!p->pack_local || p->pack_keep)2619continue;26202621if(open_pack_index(p))2622die("cannot open pack index");26232624for(i =0; i < p->num_objects; i++) {2625 sha1 =nth_packed_object_sha1(p, i);2626if(!packlist_find(&to_pack, sha1, NULL) &&2627!has_sha1_pack_kept_or_nonlocal(sha1) &&2628!loosened_object_can_be_discarded(sha1, p->mtime))2629if(force_object_loose(sha1, p->mtime))2630die("unable to force loose object");2631}2632}2633}26342635/*2636 * This tracks any options which pack-reuse code expects to be on, or which a2637 * reader of the pack might not understand, and which would therefore prevent2638 * blind reuse of what we have on disk.2639 */2640static intpack_options_allow_reuse(void)2641{2642return pack_to_stdout && allow_ofs_delta;2643}26442645static intget_object_list_from_bitmap(struct rev_info *revs)2646{2647if(prepare_bitmap_walk(revs) <0)2648return-1;26492650if(pack_options_allow_reuse() &&2651!reuse_partial_packfile_from_bitmap(2652&reuse_packfile,2653&reuse_packfile_objects,2654&reuse_packfile_offset)) {2655assert(reuse_packfile_objects);2656 nr_result += reuse_packfile_objects;2657display_progress(progress_state, nr_result);2658}26592660traverse_bitmap_commit_list(&add_object_entry_from_bitmap);2661return0;2662}26632664static voidrecord_recent_object(struct object *obj,2665const char*name,2666void*data)2667{2668sha1_array_append(&recent_objects, obj->oid.hash);2669}26702671static voidrecord_recent_commit(struct commit *commit,void*data)2672{2673sha1_array_append(&recent_objects, commit->object.oid.hash);2674}26752676static voidget_object_list(int ac,const char**av)2677{2678struct rev_info revs;2679char line[1000];2680int flags =0;26812682init_revisions(&revs, NULL);2683 save_commit_buffer =0;2684setup_revisions(ac, av, &revs, NULL);26852686/* make sure shallows are read */2687is_repository_shallow();26882689while(fgets(line,sizeof(line), stdin) != NULL) {2690int len =strlen(line);2691if(len && line[len -1] =='\n')2692 line[--len] =0;2693if(!len)2694break;2695if(*line =='-') {2696if(!strcmp(line,"--not")) {2697 flags ^= UNINTERESTING;2698 write_bitmap_index =0;2699continue;2700}2701if(starts_with(line,"--shallow ")) {2702unsigned char sha1[20];2703if(get_sha1_hex(line +10, sha1))2704die("not an SHA-1 '%s'", line +10);2705register_shallow(sha1);2706 use_bitmap_index =0;2707continue;2708}2709die("not a rev '%s'", line);2710}2711if(handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))2712die("bad revision '%s'", line);2713}27142715if(use_bitmap_index && !get_object_list_from_bitmap(&revs))2716return;27172718if(prepare_revision_walk(&revs))2719die("revision walk setup failed");2720mark_edges_uninteresting(&revs, show_edge);2721traverse_commit_list(&revs, show_commit, show_object, NULL);27222723if(unpack_unreachable_expiration) {2724 revs.ignore_missing_links =1;2725if(add_unseen_recent_objects_to_traversal(&revs,2726 unpack_unreachable_expiration))2727die("unable to add recent objects");2728if(prepare_revision_walk(&revs))2729die("revision walk setup failed");2730traverse_commit_list(&revs, record_recent_commit,2731 record_recent_object, NULL);2732}27332734if(keep_unreachable)2735add_objects_in_unpacked_packs(&revs);2736if(pack_loose_unreachable)2737add_unreachable_loose_objects();2738if(unpack_unreachable)2739loosen_unused_packed_objects(&revs);27402741sha1_array_clear(&recent_objects);2742}27432744static intoption_parse_index_version(const struct option *opt,2745const char*arg,int unset)2746{2747char*c;2748const char*val = arg;2749 pack_idx_opts.version =strtoul(val, &c,10);2750if(pack_idx_opts.version >2)2751die(_("unsupported index version%s"), val);2752if(*c ==','&& c[1])2753 pack_idx_opts.off32_limit =strtoul(c+1, &c,0);2754if(*c || pack_idx_opts.off32_limit &0x80000000)2755die(_("bad index version '%s'"), val);2756return0;2757}27582759static intoption_parse_unpack_unreachable(const struct option *opt,2760const char*arg,int unset)2761{2762if(unset) {2763 unpack_unreachable =0;2764 unpack_unreachable_expiration =0;2765}2766else{2767 unpack_unreachable =1;2768if(arg)2769 unpack_unreachable_expiration =approxidate(arg);2770}2771return0;2772}27732774intcmd_pack_objects(int argc,const char**argv,const char*prefix)2775{2776int use_internal_rev_list =0;2777int thin =0;2778int shallow =0;2779int all_progress_implied =0;2780struct argv_array rp = ARGV_ARRAY_INIT;2781int rev_list_unpacked =0, rev_list_all =0, rev_list_reflog =0;2782int rev_list_index =0;2783struct option pack_objects_options[] = {2784OPT_SET_INT('q',"quiet", &progress,2785N_("do not show progress meter"),0),2786OPT_SET_INT(0,"progress", &progress,2787N_("show progress meter"),1),2788OPT_SET_INT(0,"all-progress", &progress,2789N_("show progress meter during object writing phase"),2),2790OPT_BOOL(0,"all-progress-implied",2791&all_progress_implied,2792N_("similar to --all-progress when progress meter is shown")),2793{ OPTION_CALLBACK,0,"index-version", NULL,N_("version[,offset]"),2794N_("write the pack index file in the specified idx format version"),27950, option_parse_index_version },2796OPT_MAGNITUDE(0,"max-pack-size", &pack_size_limit,2797N_("maximum size of each output pack file")),2798OPT_BOOL(0,"local", &local,2799N_("ignore borrowed objects from alternate object store")),2800OPT_BOOL(0,"incremental", &incremental,2801N_("ignore packed objects")),2802OPT_INTEGER(0,"window", &window,2803N_("limit pack window by objects")),2804OPT_MAGNITUDE(0,"window-memory", &window_memory_limit,2805N_("limit pack window by memory in addition to object limit")),2806OPT_INTEGER(0,"depth", &depth,2807N_("maximum length of delta chain allowed in the resulting pack")),2808OPT_BOOL(0,"reuse-delta", &reuse_delta,2809N_("reuse existing deltas")),2810OPT_BOOL(0,"reuse-object", &reuse_object,2811N_("reuse existing objects")),2812OPT_BOOL(0,"delta-base-offset", &allow_ofs_delta,2813N_("use OFS_DELTA objects")),2814OPT_INTEGER(0,"threads", &delta_search_threads,2815N_("use threads when searching for best delta matches")),2816OPT_BOOL(0,"non-empty", &non_empty,2817N_("do not create an empty pack output")),2818OPT_BOOL(0,"revs", &use_internal_rev_list,2819N_("read revision arguments from standard input")),2820{ OPTION_SET_INT,0,"unpacked", &rev_list_unpacked, NULL,2821N_("limit the objects to those that are not yet packed"),2822 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2823{ OPTION_SET_INT,0,"all", &rev_list_all, NULL,2824N_("include objects reachable from any reference"),2825 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2826{ OPTION_SET_INT,0,"reflog", &rev_list_reflog, NULL,2827N_("include objects referred by reflog entries"),2828 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2829{ OPTION_SET_INT,0,"indexed-objects", &rev_list_index, NULL,2830N_("include objects referred to by the index"),2831 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2832OPT_BOOL(0,"stdout", &pack_to_stdout,2833N_("output pack to stdout")),2834OPT_BOOL(0,"include-tag", &include_tag,2835N_("include tag objects that refer to objects to be packed")),2836OPT_BOOL(0,"keep-unreachable", &keep_unreachable,2837N_("keep unreachable objects")),2838OPT_BOOL(0,"pack-loose-unreachable", &pack_loose_unreachable,2839N_("pack loose unreachable objects")),2840{ OPTION_CALLBACK,0,"unpack-unreachable", NULL,N_("time"),2841N_("unpack unreachable objects newer than <time>"),2842 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },2843OPT_BOOL(0,"thin", &thin,2844N_("create thin packs")),2845OPT_BOOL(0,"shallow", &shallow,2846N_("create packs suitable for shallow fetches")),2847OPT_BOOL(0,"honor-pack-keep", &ignore_packed_keep,2848N_("ignore packs that have companion .keep file")),2849OPT_INTEGER(0,"compression", &pack_compression_level,2850N_("pack compression level")),2851OPT_SET_INT(0,"keep-true-parents", &grafts_replace_parents,2852N_("do not hide commits by grafts"),0),2853OPT_BOOL(0,"use-bitmap-index", &use_bitmap_index,2854N_("use a bitmap index if available to speed up counting objects")),2855OPT_BOOL(0,"write-bitmap-index", &write_bitmap_index,2856N_("write a bitmap index together with the pack index")),2857OPT_END(),2858};28592860 check_replace_refs =0;28612862reset_pack_idx_option(&pack_idx_opts);2863git_config(git_pack_config, NULL);28642865 progress =isatty(2);2866 argc =parse_options(argc, argv, prefix, pack_objects_options,2867 pack_usage,0);28682869if(argc) {2870 base_name = argv[0];2871 argc--;2872}2873if(pack_to_stdout != !base_name || argc)2874usage_with_options(pack_usage, pack_objects_options);28752876argv_array_push(&rp,"pack-objects");2877if(thin) {2878 use_internal_rev_list =1;2879argv_array_push(&rp, shallow2880?"--objects-edge-aggressive"2881:"--objects-edge");2882}else2883argv_array_push(&rp,"--objects");28842885if(rev_list_all) {2886 use_internal_rev_list =1;2887argv_array_push(&rp,"--all");2888}2889if(rev_list_reflog) {2890 use_internal_rev_list =1;2891argv_array_push(&rp,"--reflog");2892}2893if(rev_list_index) {2894 use_internal_rev_list =1;2895argv_array_push(&rp,"--indexed-objects");2896}2897if(rev_list_unpacked) {2898 use_internal_rev_list =1;2899argv_array_push(&rp,"--unpacked");2900}29012902if(!reuse_object)2903 reuse_delta =0;2904if(pack_compression_level == -1)2905 pack_compression_level = Z_DEFAULT_COMPRESSION;2906else if(pack_compression_level <0|| pack_compression_level > Z_BEST_COMPRESSION)2907die("bad pack compression level%d", pack_compression_level);29082909if(!delta_search_threads)/* --threads=0 means autodetect */2910 delta_search_threads =online_cpus();29112912#ifdef NO_PTHREADS2913if(delta_search_threads !=1)2914warning("no threads support, ignoring --threads");2915#endif2916if(!pack_to_stdout && !pack_size_limit)2917 pack_size_limit = pack_size_limit_cfg;2918if(pack_to_stdout && pack_size_limit)2919die("--max-pack-size cannot be used to build a pack for transfer.");2920if(pack_size_limit && pack_size_limit <1024*1024) {2921warning("minimum pack size limit is 1 MiB");2922 pack_size_limit =1024*1024;2923}29242925if(!pack_to_stdout && thin)2926die("--thin cannot be used to build an indexable pack.");29272928if(keep_unreachable && unpack_unreachable)2929die("--keep-unreachable and --unpack-unreachable are incompatible.");2930if(!rev_list_all || !rev_list_reflog || !rev_list_index)2931 unpack_unreachable_expiration =0;29322933/*2934 * "soft" reasons not to use bitmaps - for on-disk repack by default we want2935 *2936 * - to produce good pack (with bitmap index not-yet-packed objects are2937 * packed in suboptimal order).2938 *2939 * - to use more robust pack-generation codepath (avoiding possible2940 * bugs in bitmap code and possible bitmap index corruption).2941 */2942if(!pack_to_stdout)2943 use_bitmap_index_default =0;29442945if(use_bitmap_index <0)2946 use_bitmap_index = use_bitmap_index_default;29472948/* "hard" reasons not to use bitmaps; these just won't work at all */2949if(!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) ||is_repository_shallow())2950 use_bitmap_index =0;29512952if(pack_to_stdout || !rev_list_all)2953 write_bitmap_index =0;29542955if(progress && all_progress_implied)2956 progress =2;29572958prepare_packed_git();2959if(ignore_packed_keep) {2960struct packed_git *p;2961for(p = packed_git; p; p = p->next)2962if(p->pack_local && p->pack_keep)2963break;2964if(!p)/* no keep-able packs found */2965 ignore_packed_keep =0;2966}2967if(local) {2968/*2969 * unlike ignore_packed_keep above, we do not want to2970 * unset "local" based on looking at packs, as it2971 * also covers non-local objects2972 */2973struct packed_git *p;2974for(p = packed_git; p; p = p->next) {2975if(!p->pack_local) {2976 have_non_local_packs =1;2977break;2978}2979}2980}29812982if(progress)2983 progress_state =start_progress(_("Counting objects"),0);2984if(!use_internal_rev_list)2985read_object_list_from_stdin();2986else{2987get_object_list(rp.argc, rp.argv);2988argv_array_clear(&rp);2989}2990cleanup_preferred_base();2991if(include_tag && nr_result)2992for_each_ref(add_ref_tag, NULL);2993stop_progress(&progress_state);29942995if(non_empty && !nr_result)2996return0;2997if(nr_result)2998prepare_pack(window, depth);2999write_pack_file();3000if(progress)3001fprintf(stderr,"Total %"PRIu32" (delta %"PRIu32"),"3002" reused %"PRIu32" (delta %"PRIu32")\n",3003 written, written_delta, reused, reused_delta);3004return0;3005}