1#include"builtin.h" 2#include"cache.h" 3#include"config.h" 4#include"attr.h" 5#include"object.h" 6#include"blob.h" 7#include"commit.h" 8#include"tag.h" 9#include"tree.h" 10#include"delta.h" 11#include"pack.h" 12#include"pack-revindex.h" 13#include"csum-file.h" 14#include"tree-walk.h" 15#include"diff.h" 16#include"revision.h" 17#include"list-objects.h" 18#include"pack-objects.h" 19#include"progress.h" 20#include"refs.h" 21#include"streaming.h" 22#include"thread-utils.h" 23#include"pack-bitmap.h" 24#include"reachable.h" 25#include"sha1-array.h" 26#include"argv-array.h" 27#include"mru.h" 28#include"packfile.h" 29 30static const char*pack_usage[] = { 31N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 32N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 33 NULL 34}; 35 36/* 37 * Objects we are going to pack are collected in the `to_pack` structure. 38 * It contains an array (dynamically expanded) of the object data, and a map 39 * that can resolve SHA1s to their position in the array. 40 */ 41static struct packing_data to_pack; 42 43static struct pack_idx_entry **written_list; 44static uint32_t nr_result, nr_written; 45 46static int non_empty; 47static int reuse_delta =1, reuse_object =1; 48static int keep_unreachable, unpack_unreachable, include_tag; 49static timestamp_t unpack_unreachable_expiration; 50static int pack_loose_unreachable; 51static int local; 52static int have_non_local_packs; 53static int incremental; 54static int ignore_packed_keep; 55static int allow_ofs_delta; 56static struct pack_idx_option pack_idx_opts; 57static const char*base_name; 58static int progress =1; 59static int window =10; 60static unsigned long pack_size_limit; 61static int depth =50; 62static int delta_search_threads; 63static int pack_to_stdout; 64static int num_preferred_base; 65static struct progress *progress_state; 66 67static struct packed_git *reuse_packfile; 68static uint32_t reuse_packfile_objects; 69static off_t reuse_packfile_offset; 70 71static int use_bitmap_index_default =1; 72static int use_bitmap_index = -1; 73static int write_bitmap_index; 74static uint16_t write_bitmap_options; 75 76static unsigned long delta_cache_size =0; 77static unsigned long max_delta_cache_size =256*1024*1024; 78static unsigned long cache_max_small_delta_size =1000; 79 80static unsigned long window_memory_limit =0; 81 82/* 83 * stats 84 */ 85static uint32_t written, written_delta; 86static uint32_t reused, reused_delta; 87 88/* 89 * Indexed commits 90 */ 91static struct commit **indexed_commits; 92static unsigned int indexed_commits_nr; 93static unsigned int indexed_commits_alloc; 94 95static voidindex_commit_for_bitmap(struct commit *commit) 96{ 97if(indexed_commits_nr >= indexed_commits_alloc) { 98 indexed_commits_alloc = (indexed_commits_alloc +32) *2; 99REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 100} 101 102 indexed_commits[indexed_commits_nr++] = commit; 103} 104 105static void*get_delta(struct object_entry *entry) 106{ 107unsigned long size, base_size, delta_size; 108void*buf, *base_buf, *delta_buf; 109enum object_type type; 110 111 buf =read_sha1_file(entry->idx.oid.hash, &type, &size); 112if(!buf) 113die("unable to read%s",oid_to_hex(&entry->idx.oid)); 114 base_buf =read_sha1_file(entry->delta->idx.oid.hash, &type, 115&base_size); 116if(!base_buf) 117die("unable to read%s", 118oid_to_hex(&entry->delta->idx.oid)); 119 delta_buf =diff_delta(base_buf, base_size, 120 buf, size, &delta_size,0); 121if(!delta_buf || delta_size != entry->delta_size) 122die("delta size changed"); 123free(buf); 124free(base_buf); 125return delta_buf; 126} 127 128static unsigned longdo_compress(void**pptr,unsigned long size) 129{ 130 git_zstream stream; 131void*in, *out; 132unsigned long maxsize; 133 134git_deflate_init(&stream, pack_compression_level); 135 maxsize =git_deflate_bound(&stream, size); 136 137 in = *pptr; 138 out =xmalloc(maxsize); 139*pptr = out; 140 141 stream.next_in = in; 142 stream.avail_in = size; 143 stream.next_out = out; 144 stream.avail_out = maxsize; 145while(git_deflate(&stream, Z_FINISH) == Z_OK) 146;/* nothing */ 147git_deflate_end(&stream); 148 149free(in); 150return stream.total_out; 151} 152 153static unsigned longwrite_large_blob_data(struct git_istream *st,struct sha1file *f, 154const unsigned char*sha1) 155{ 156 git_zstream stream; 157unsigned char ibuf[1024*16]; 158unsigned char obuf[1024*16]; 159unsigned long olen =0; 160 161git_deflate_init(&stream, pack_compression_level); 162 163for(;;) { 164 ssize_t readlen; 165int zret = Z_OK; 166 readlen =read_istream(st, ibuf,sizeof(ibuf)); 167if(readlen == -1) 168die(_("unable to read%s"),sha1_to_hex(sha1)); 169 170 stream.next_in = ibuf; 171 stream.avail_in = readlen; 172while((stream.avail_in || readlen ==0) && 173(zret == Z_OK || zret == Z_BUF_ERROR)) { 174 stream.next_out = obuf; 175 stream.avail_out =sizeof(obuf); 176 zret =git_deflate(&stream, readlen ?0: Z_FINISH); 177sha1write(f, obuf, stream.next_out - obuf); 178 olen += stream.next_out - obuf; 179} 180if(stream.avail_in) 181die(_("deflate error (%d)"), zret); 182if(readlen ==0) { 183if(zret != Z_STREAM_END) 184die(_("deflate error (%d)"), zret); 185break; 186} 187} 188git_deflate_end(&stream); 189return olen; 190} 191 192/* 193 * we are going to reuse the existing object data as is. make 194 * sure it is not corrupt. 195 */ 196static intcheck_pack_inflate(struct packed_git *p, 197struct pack_window **w_curs, 198 off_t offset, 199 off_t len, 200unsigned long expect) 201{ 202 git_zstream stream; 203unsigned char fakebuf[4096], *in; 204int st; 205 206memset(&stream,0,sizeof(stream)); 207git_inflate_init(&stream); 208do{ 209 in =use_pack(p, w_curs, offset, &stream.avail_in); 210 stream.next_in = in; 211 stream.next_out = fakebuf; 212 stream.avail_out =sizeof(fakebuf); 213 st =git_inflate(&stream, Z_FINISH); 214 offset += stream.next_in - in; 215}while(st == Z_OK || st == Z_BUF_ERROR); 216git_inflate_end(&stream); 217return(st == Z_STREAM_END && 218 stream.total_out == expect && 219 stream.total_in == len) ?0: -1; 220} 221 222static voidcopy_pack_data(struct sha1file *f, 223struct packed_git *p, 224struct pack_window **w_curs, 225 off_t offset, 226 off_t len) 227{ 228unsigned char*in; 229unsigned long avail; 230 231while(len) { 232 in =use_pack(p, w_curs, offset, &avail); 233if(avail > len) 234 avail = (unsigned long)len; 235sha1write(f, in, avail); 236 offset += avail; 237 len -= avail; 238} 239} 240 241/* Return 0 if we will bust the pack-size limit */ 242static unsigned longwrite_no_reuse_object(struct sha1file *f,struct object_entry *entry, 243unsigned long limit,int usable_delta) 244{ 245unsigned long size, datalen; 246unsigned char header[MAX_PACK_OBJECT_HEADER], 247 dheader[MAX_PACK_OBJECT_HEADER]; 248unsigned hdrlen; 249enum object_type type; 250void*buf; 251struct git_istream *st = NULL; 252 253if(!usable_delta) { 254if(entry->type == OBJ_BLOB && 255 entry->size > big_file_threshold && 256(st =open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL) 257 buf = NULL; 258else{ 259 buf =read_sha1_file(entry->idx.oid.hash, &type, 260&size); 261if(!buf) 262die(_("unable to read%s"), 263oid_to_hex(&entry->idx.oid)); 264} 265/* 266 * make sure no cached delta data remains from a 267 * previous attempt before a pack split occurred. 268 */ 269FREE_AND_NULL(entry->delta_data); 270 entry->z_delta_size =0; 271}else if(entry->delta_data) { 272 size = entry->delta_size; 273 buf = entry->delta_data; 274 entry->delta_data = NULL; 275 type = (allow_ofs_delta && entry->delta->idx.offset) ? 276 OBJ_OFS_DELTA : OBJ_REF_DELTA; 277}else{ 278 buf =get_delta(entry); 279 size = entry->delta_size; 280 type = (allow_ofs_delta && entry->delta->idx.offset) ? 281 OBJ_OFS_DELTA : OBJ_REF_DELTA; 282} 283 284if(st)/* large blob case, just assume we don't compress well */ 285 datalen = size; 286else if(entry->z_delta_size) 287 datalen = entry->z_delta_size; 288else 289 datalen =do_compress(&buf, size); 290 291/* 292 * The object header is a byte of 'type' followed by zero or 293 * more bytes of length. 294 */ 295 hdrlen =encode_in_pack_object_header(header,sizeof(header), 296 type, size); 297 298if(type == OBJ_OFS_DELTA) { 299/* 300 * Deltas with relative base contain an additional 301 * encoding of the relative offset for the delta 302 * base from this object's position in the pack. 303 */ 304 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 305unsigned pos =sizeof(dheader) -1; 306 dheader[pos] = ofs &127; 307while(ofs >>=7) 308 dheader[--pos] =128| (--ofs &127); 309if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 310if(st) 311close_istream(st); 312free(buf); 313return0; 314} 315sha1write(f, header, hdrlen); 316sha1write(f, dheader + pos,sizeof(dheader) - pos); 317 hdrlen +=sizeof(dheader) - pos; 318}else if(type == OBJ_REF_DELTA) { 319/* 320 * Deltas with a base reference contain 321 * an additional 20 bytes for the base sha1. 322 */ 323if(limit && hdrlen +20+ datalen +20>= limit) { 324if(st) 325close_istream(st); 326free(buf); 327return0; 328} 329sha1write(f, header, hdrlen); 330sha1write(f, entry->delta->idx.oid.hash,20); 331 hdrlen +=20; 332}else{ 333if(limit && hdrlen + datalen +20>= limit) { 334if(st) 335close_istream(st); 336free(buf); 337return0; 338} 339sha1write(f, header, hdrlen); 340} 341if(st) { 342 datalen =write_large_blob_data(st, f, entry->idx.oid.hash); 343close_istream(st); 344}else{ 345sha1write(f, buf, datalen); 346free(buf); 347} 348 349return hdrlen + datalen; 350} 351 352/* Return 0 if we will bust the pack-size limit */ 353static off_t write_reuse_object(struct sha1file *f,struct object_entry *entry, 354unsigned long limit,int usable_delta) 355{ 356struct packed_git *p = entry->in_pack; 357struct pack_window *w_curs = NULL; 358struct revindex_entry *revidx; 359 off_t offset; 360enum object_type type = entry->type; 361 off_t datalen; 362unsigned char header[MAX_PACK_OBJECT_HEADER], 363 dheader[MAX_PACK_OBJECT_HEADER]; 364unsigned hdrlen; 365 366if(entry->delta) 367 type = (allow_ofs_delta && entry->delta->idx.offset) ? 368 OBJ_OFS_DELTA : OBJ_REF_DELTA; 369 hdrlen =encode_in_pack_object_header(header,sizeof(header), 370 type, entry->size); 371 372 offset = entry->in_pack_offset; 373 revidx =find_pack_revindex(p, offset); 374 datalen = revidx[1].offset - offset; 375if(!pack_to_stdout && p->index_version >1&& 376check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 377error("bad packed object CRC for%s", 378oid_to_hex(&entry->idx.oid)); 379unuse_pack(&w_curs); 380returnwrite_no_reuse_object(f, entry, limit, usable_delta); 381} 382 383 offset += entry->in_pack_header_size; 384 datalen -= entry->in_pack_header_size; 385 386if(!pack_to_stdout && p->index_version ==1&& 387check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) { 388error("corrupt packed object for%s", 389oid_to_hex(&entry->idx.oid)); 390unuse_pack(&w_curs); 391returnwrite_no_reuse_object(f, entry, limit, usable_delta); 392} 393 394if(type == OBJ_OFS_DELTA) { 395 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 396unsigned pos =sizeof(dheader) -1; 397 dheader[pos] = ofs &127; 398while(ofs >>=7) 399 dheader[--pos] =128| (--ofs &127); 400if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 401unuse_pack(&w_curs); 402return0; 403} 404sha1write(f, header, hdrlen); 405sha1write(f, dheader + pos,sizeof(dheader) - pos); 406 hdrlen +=sizeof(dheader) - pos; 407 reused_delta++; 408}else if(type == OBJ_REF_DELTA) { 409if(limit && hdrlen +20+ datalen +20>= limit) { 410unuse_pack(&w_curs); 411return0; 412} 413sha1write(f, header, hdrlen); 414sha1write(f, entry->delta->idx.oid.hash,20); 415 hdrlen +=20; 416 reused_delta++; 417}else{ 418if(limit && hdrlen + datalen +20>= limit) { 419unuse_pack(&w_curs); 420return0; 421} 422sha1write(f, header, hdrlen); 423} 424copy_pack_data(f, p, &w_curs, offset, datalen); 425unuse_pack(&w_curs); 426 reused++; 427return hdrlen + datalen; 428} 429 430/* Return 0 if we will bust the pack-size limit */ 431static off_t write_object(struct sha1file *f, 432struct object_entry *entry, 433 off_t write_offset) 434{ 435unsigned long limit; 436 off_t len; 437int usable_delta, to_reuse; 438 439if(!pack_to_stdout) 440crc32_begin(f); 441 442/* apply size limit if limited packsize and not first object */ 443if(!pack_size_limit || !nr_written) 444 limit =0; 445else if(pack_size_limit <= write_offset) 446/* 447 * the earlier object did not fit the limit; avoid 448 * mistaking this with unlimited (i.e. limit = 0). 449 */ 450 limit =1; 451else 452 limit = pack_size_limit - write_offset; 453 454if(!entry->delta) 455 usable_delta =0;/* no delta */ 456else if(!pack_size_limit) 457 usable_delta =1;/* unlimited packfile */ 458else if(entry->delta->idx.offset == (off_t)-1) 459 usable_delta =0;/* base was written to another pack */ 460else if(entry->delta->idx.offset) 461 usable_delta =1;/* base already exists in this pack */ 462else 463 usable_delta =0;/* base could end up in another pack */ 464 465if(!reuse_object) 466 to_reuse =0;/* explicit */ 467else if(!entry->in_pack) 468 to_reuse =0;/* can't reuse what we don't have */ 469else if(entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA) 470/* check_object() decided it for us ... */ 471 to_reuse = usable_delta; 472/* ... but pack split may override that */ 473else if(entry->type != entry->in_pack_type) 474 to_reuse =0;/* pack has delta which is unusable */ 475else if(entry->delta) 476 to_reuse =0;/* we want to pack afresh */ 477else 478 to_reuse =1;/* we have it in-pack undeltified, 479 * and we do not need to deltify it. 480 */ 481 482if(!to_reuse) 483 len =write_no_reuse_object(f, entry, limit, usable_delta); 484else 485 len =write_reuse_object(f, entry, limit, usable_delta); 486if(!len) 487return0; 488 489if(usable_delta) 490 written_delta++; 491 written++; 492if(!pack_to_stdout) 493 entry->idx.crc32 =crc32_end(f); 494return len; 495} 496 497enum write_one_status { 498 WRITE_ONE_SKIP = -1,/* already written */ 499 WRITE_ONE_BREAK =0,/* writing this will bust the limit; not written */ 500 WRITE_ONE_WRITTEN =1,/* normal */ 501 WRITE_ONE_RECURSIVE =2/* already scheduled to be written */ 502}; 503 504static enum write_one_status write_one(struct sha1file *f, 505struct object_entry *e, 506 off_t *offset) 507{ 508 off_t size; 509int recursing; 510 511/* 512 * we set offset to 1 (which is an impossible value) to mark 513 * the fact that this object is involved in "write its base 514 * first before writing a deltified object" recursion. 515 */ 516 recursing = (e->idx.offset ==1); 517if(recursing) { 518warning("recursive delta detected for object%s", 519oid_to_hex(&e->idx.oid)); 520return WRITE_ONE_RECURSIVE; 521}else if(e->idx.offset || e->preferred_base) { 522/* offset is non zero if object is written already. */ 523return WRITE_ONE_SKIP; 524} 525 526/* if we are deltified, write out base object first. */ 527if(e->delta) { 528 e->idx.offset =1;/* now recurse */ 529switch(write_one(f, e->delta, offset)) { 530case WRITE_ONE_RECURSIVE: 531/* we cannot depend on this one */ 532 e->delta = NULL; 533break; 534default: 535break; 536case WRITE_ONE_BREAK: 537 e->idx.offset = recursing; 538return WRITE_ONE_BREAK; 539} 540} 541 542 e->idx.offset = *offset; 543 size =write_object(f, e, *offset); 544if(!size) { 545 e->idx.offset = recursing; 546return WRITE_ONE_BREAK; 547} 548 written_list[nr_written++] = &e->idx; 549 550/* make sure off_t is sufficiently large not to wrap */ 551if(signed_add_overflows(*offset, size)) 552die("pack too large for current definition of off_t"); 553*offset += size; 554return WRITE_ONE_WRITTEN; 555} 556 557static intmark_tagged(const char*path,const struct object_id *oid,int flag, 558void*cb_data) 559{ 560unsigned char peeled[20]; 561struct object_entry *entry =packlist_find(&to_pack, oid->hash, NULL); 562 563if(entry) 564 entry->tagged =1; 565if(!peel_ref(path, peeled)) { 566 entry =packlist_find(&to_pack, peeled, NULL); 567if(entry) 568 entry->tagged =1; 569} 570return0; 571} 572 573staticinlinevoidadd_to_write_order(struct object_entry **wo, 574unsigned int*endp, 575struct object_entry *e) 576{ 577if(e->filled) 578return; 579 wo[(*endp)++] = e; 580 e->filled =1; 581} 582 583static voidadd_descendants_to_write_order(struct object_entry **wo, 584unsigned int*endp, 585struct object_entry *e) 586{ 587int add_to_order =1; 588while(e) { 589if(add_to_order) { 590struct object_entry *s; 591/* add this node... */ 592add_to_write_order(wo, endp, e); 593/* all its siblings... */ 594for(s = e->delta_sibling; s; s = s->delta_sibling) { 595add_to_write_order(wo, endp, s); 596} 597} 598/* drop down a level to add left subtree nodes if possible */ 599if(e->delta_child) { 600 add_to_order =1; 601 e = e->delta_child; 602}else{ 603 add_to_order =0; 604/* our sibling might have some children, it is next */ 605if(e->delta_sibling) { 606 e = e->delta_sibling; 607continue; 608} 609/* go back to our parent node */ 610 e = e->delta; 611while(e && !e->delta_sibling) { 612/* we're on the right side of a subtree, keep 613 * going up until we can go right again */ 614 e = e->delta; 615} 616if(!e) { 617/* done- we hit our original root node */ 618return; 619} 620/* pass it off to sibling at this level */ 621 e = e->delta_sibling; 622} 623}; 624} 625 626static voidadd_family_to_write_order(struct object_entry **wo, 627unsigned int*endp, 628struct object_entry *e) 629{ 630struct object_entry *root; 631 632for(root = e; root->delta; root = root->delta) 633;/* nothing */ 634add_descendants_to_write_order(wo, endp, root); 635} 636 637static struct object_entry **compute_write_order(void) 638{ 639unsigned int i, wo_end, last_untagged; 640 641struct object_entry **wo; 642struct object_entry *objects = to_pack.objects; 643 644for(i =0; i < to_pack.nr_objects; i++) { 645 objects[i].tagged =0; 646 objects[i].filled =0; 647 objects[i].delta_child = NULL; 648 objects[i].delta_sibling = NULL; 649} 650 651/* 652 * Fully connect delta_child/delta_sibling network. 653 * Make sure delta_sibling is sorted in the original 654 * recency order. 655 */ 656for(i = to_pack.nr_objects; i >0;) { 657struct object_entry *e = &objects[--i]; 658if(!e->delta) 659continue; 660/* Mark me as the first child */ 661 e->delta_sibling = e->delta->delta_child; 662 e->delta->delta_child = e; 663} 664 665/* 666 * Mark objects that are at the tip of tags. 667 */ 668for_each_tag_ref(mark_tagged, NULL); 669 670/* 671 * Give the objects in the original recency order until 672 * we see a tagged tip. 673 */ 674ALLOC_ARRAY(wo, to_pack.nr_objects); 675for(i = wo_end =0; i < to_pack.nr_objects; i++) { 676if(objects[i].tagged) 677break; 678add_to_write_order(wo, &wo_end, &objects[i]); 679} 680 last_untagged = i; 681 682/* 683 * Then fill all the tagged tips. 684 */ 685for(; i < to_pack.nr_objects; i++) { 686if(objects[i].tagged) 687add_to_write_order(wo, &wo_end, &objects[i]); 688} 689 690/* 691 * And then all remaining commits and tags. 692 */ 693for(i = last_untagged; i < to_pack.nr_objects; i++) { 694if(objects[i].type != OBJ_COMMIT && 695 objects[i].type != OBJ_TAG) 696continue; 697add_to_write_order(wo, &wo_end, &objects[i]); 698} 699 700/* 701 * And then all the trees. 702 */ 703for(i = last_untagged; i < to_pack.nr_objects; i++) { 704if(objects[i].type != OBJ_TREE) 705continue; 706add_to_write_order(wo, &wo_end, &objects[i]); 707} 708 709/* 710 * Finally all the rest in really tight order 711 */ 712for(i = last_untagged; i < to_pack.nr_objects; i++) { 713if(!objects[i].filled) 714add_family_to_write_order(wo, &wo_end, &objects[i]); 715} 716 717if(wo_end != to_pack.nr_objects) 718die("ordered%uobjects, expected %"PRIu32, wo_end, to_pack.nr_objects); 719 720return wo; 721} 722 723static off_t write_reused_pack(struct sha1file *f) 724{ 725unsigned char buffer[8192]; 726 off_t to_write, total; 727int fd; 728 729if(!is_pack_valid(reuse_packfile)) 730die("packfile is invalid:%s", reuse_packfile->pack_name); 731 732 fd =git_open(reuse_packfile->pack_name); 733if(fd <0) 734die_errno("unable to open packfile for reuse:%s", 735 reuse_packfile->pack_name); 736 737if(lseek(fd,sizeof(struct pack_header), SEEK_SET) == -1) 738die_errno("unable to seek in reused packfile"); 739 740if(reuse_packfile_offset <0) 741 reuse_packfile_offset = reuse_packfile->pack_size -20; 742 743 total = to_write = reuse_packfile_offset -sizeof(struct pack_header); 744 745while(to_write) { 746int read_pack =xread(fd, buffer,sizeof(buffer)); 747 748if(read_pack <=0) 749die_errno("unable to read from reused packfile"); 750 751if(read_pack > to_write) 752 read_pack = to_write; 753 754sha1write(f, buffer, read_pack); 755 to_write -= read_pack; 756 757/* 758 * We don't know the actual number of objects written, 759 * only how many bytes written, how many bytes total, and 760 * how many objects total. So we can fake it by pretending all 761 * objects we are writing are the same size. This gives us a 762 * smooth progress meter, and at the end it matches the true 763 * answer. 764 */ 765 written = reuse_packfile_objects * 766(((double)(total - to_write)) / total); 767display_progress(progress_state, written); 768} 769 770close(fd); 771 written = reuse_packfile_objects; 772display_progress(progress_state, written); 773return reuse_packfile_offset -sizeof(struct pack_header); 774} 775 776static const char no_split_warning[] =N_( 777"disabling bitmap writing, packs are split due to pack.packSizeLimit" 778); 779 780static voidwrite_pack_file(void) 781{ 782uint32_t i =0, j; 783struct sha1file *f; 784 off_t offset; 785uint32_t nr_remaining = nr_result; 786time_t last_mtime =0; 787struct object_entry **write_order; 788 789if(progress > pack_to_stdout) 790 progress_state =start_progress(_("Writing objects"), nr_result); 791ALLOC_ARRAY(written_list, to_pack.nr_objects); 792 write_order =compute_write_order(); 793 794do{ 795unsigned char sha1[20]; 796char*pack_tmp_name = NULL; 797 798if(pack_to_stdout) 799 f =sha1fd_throughput(1,"<stdout>", progress_state); 800else 801 f =create_tmp_packfile(&pack_tmp_name); 802 803 offset =write_pack_header(f, nr_remaining); 804 805if(reuse_packfile) { 806 off_t packfile_size; 807assert(pack_to_stdout); 808 809 packfile_size =write_reused_pack(f); 810 offset += packfile_size; 811} 812 813 nr_written =0; 814for(; i < to_pack.nr_objects; i++) { 815struct object_entry *e = write_order[i]; 816if(write_one(f, e, &offset) == WRITE_ONE_BREAK) 817break; 818display_progress(progress_state, written); 819} 820 821/* 822 * Did we write the wrong # entries in the header? 823 * If so, rewrite it like in fast-import 824 */ 825if(pack_to_stdout) { 826sha1close(f, sha1, CSUM_CLOSE); 827}else if(nr_written == nr_remaining) { 828sha1close(f, sha1, CSUM_FSYNC); 829}else{ 830int fd =sha1close(f, sha1,0); 831fixup_pack_header_footer(fd, sha1, pack_tmp_name, 832 nr_written, sha1, offset); 833close(fd); 834if(write_bitmap_index) { 835warning(_(no_split_warning)); 836 write_bitmap_index =0; 837} 838} 839 840if(!pack_to_stdout) { 841struct stat st; 842struct strbuf tmpname = STRBUF_INIT; 843 844/* 845 * Packs are runtime accessed in their mtime 846 * order since newer packs are more likely to contain 847 * younger objects. So if we are creating multiple 848 * packs then we should modify the mtime of later ones 849 * to preserve this property. 850 */ 851if(stat(pack_tmp_name, &st) <0) { 852warning_errno("failed to stat%s", pack_tmp_name); 853}else if(!last_mtime) { 854 last_mtime = st.st_mtime; 855}else{ 856struct utimbuf utb; 857 utb.actime = st.st_atime; 858 utb.modtime = --last_mtime; 859if(utime(pack_tmp_name, &utb) <0) 860warning_errno("failed utime() on%s", pack_tmp_name); 861} 862 863strbuf_addf(&tmpname,"%s-", base_name); 864 865if(write_bitmap_index) { 866bitmap_writer_set_checksum(sha1); 867bitmap_writer_build_type_index(written_list, nr_written); 868} 869 870finish_tmp_packfile(&tmpname, pack_tmp_name, 871 written_list, nr_written, 872&pack_idx_opts, sha1); 873 874if(write_bitmap_index) { 875strbuf_addf(&tmpname,"%s.bitmap",sha1_to_hex(sha1)); 876 877stop_progress(&progress_state); 878 879bitmap_writer_show_progress(progress); 880bitmap_writer_reuse_bitmaps(&to_pack); 881bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 882bitmap_writer_build(&to_pack); 883bitmap_writer_finish(written_list, nr_written, 884 tmpname.buf, write_bitmap_options); 885 write_bitmap_index =0; 886} 887 888strbuf_release(&tmpname); 889free(pack_tmp_name); 890puts(sha1_to_hex(sha1)); 891} 892 893/* mark written objects as written to previous pack */ 894for(j =0; j < nr_written; j++) { 895 written_list[j]->offset = (off_t)-1; 896} 897 nr_remaining -= nr_written; 898}while(nr_remaining && i < to_pack.nr_objects); 899 900free(written_list); 901free(write_order); 902stop_progress(&progress_state); 903if(written != nr_result) 904die("wrote %"PRIu32" objects while expecting %"PRIu32, 905 written, nr_result); 906} 907 908static intno_try_delta(const char*path) 909{ 910static struct attr_check *check; 911 912if(!check) 913 check =attr_check_initl("delta", NULL); 914if(git_check_attr(path, check)) 915return0; 916if(ATTR_FALSE(check->items[0].value)) 917return1; 918return0; 919} 920 921/* 922 * When adding an object, check whether we have already added it 923 * to our packing list. If so, we can skip. However, if we are 924 * being asked to excludei t, but the previous mention was to include 925 * it, make sure to adjust its flags and tweak our numbers accordingly. 926 * 927 * As an optimization, we pass out the index position where we would have 928 * found the item, since that saves us from having to look it up again a 929 * few lines later when we want to add the new entry. 930 */ 931static inthave_duplicate_entry(const unsigned char*sha1, 932int exclude, 933uint32_t*index_pos) 934{ 935struct object_entry *entry; 936 937 entry =packlist_find(&to_pack, sha1, index_pos); 938if(!entry) 939return0; 940 941if(exclude) { 942if(!entry->preferred_base) 943 nr_result--; 944 entry->preferred_base =1; 945} 946 947return1; 948} 949 950static intwant_found_object(int exclude,struct packed_git *p) 951{ 952if(exclude) 953return1; 954if(incremental) 955return0; 956 957/* 958 * When asked to do --local (do not include an object that appears in a 959 * pack we borrow from elsewhere) or --honor-pack-keep (do not include 960 * an object that appears in a pack marked with .keep), finding a pack 961 * that matches the criteria is sufficient for us to decide to omit it. 962 * However, even if this pack does not satisfy the criteria, we need to 963 * make sure no copy of this object appears in _any_ pack that makes us 964 * to omit the object, so we need to check all the packs. 965 * 966 * We can however first check whether these options can possible matter; 967 * if they do not matter we know we want the object in generated pack. 968 * Otherwise, we signal "-1" at the end to tell the caller that we do 969 * not know either way, and it needs to check more packs. 970 */ 971if(!ignore_packed_keep && 972(!local || !have_non_local_packs)) 973return1; 974 975if(local && !p->pack_local) 976return0; 977if(ignore_packed_keep && p->pack_local && p->pack_keep) 978return0; 979 980/* we don't know yet; keep looking for more packs */ 981return-1; 982} 983 984/* 985 * Check whether we want the object in the pack (e.g., we do not want 986 * objects found in non-local stores if the "--local" option was used). 987 * 988 * If the caller already knows an existing pack it wants to take the object 989 * from, that is passed in *found_pack and *found_offset; otherwise this 990 * function finds if there is any pack that has the object and returns the pack 991 * and its offset in these variables. 992 */ 993static intwant_object_in_pack(const unsigned char*sha1, 994int exclude, 995struct packed_git **found_pack, 996 off_t *found_offset) 997{ 998int want; 999struct list_head *pos;10001001if(!exclude && local &&has_loose_object_nonlocal(sha1))1002return0;10031004/*1005 * If we already know the pack object lives in, start checks from that1006 * pack - in the usual case when neither --local was given nor .keep files1007 * are present we will determine the answer right now.1008 */1009if(*found_pack) {1010 want =want_found_object(exclude, *found_pack);1011if(want != -1)1012return want;1013}10141015list_for_each(pos, &packed_git_mru.list) {1016struct mru *entry =list_entry(pos,struct mru, list);1017struct packed_git *p = entry->item;1018 off_t offset;10191020if(p == *found_pack)1021 offset = *found_offset;1022else1023 offset =find_pack_entry_one(sha1, p);10241025if(offset) {1026if(!*found_pack) {1027if(!is_pack_valid(p))1028continue;1029*found_offset = offset;1030*found_pack = p;1031}1032 want =want_found_object(exclude, p);1033if(!exclude && want >0)1034mru_mark(&packed_git_mru, entry);1035if(want != -1)1036return want;1037}1038}10391040return1;1041}10421043static voidcreate_object_entry(const unsigned char*sha1,1044enum object_type type,1045uint32_t hash,1046int exclude,1047int no_try_delta,1048uint32_t index_pos,1049struct packed_git *found_pack,1050 off_t found_offset)1051{1052struct object_entry *entry;10531054 entry =packlist_alloc(&to_pack, sha1, index_pos);1055 entry->hash = hash;1056if(type)1057 entry->type = type;1058if(exclude)1059 entry->preferred_base =1;1060else1061 nr_result++;1062if(found_pack) {1063 entry->in_pack = found_pack;1064 entry->in_pack_offset = found_offset;1065}10661067 entry->no_try_delta = no_try_delta;1068}10691070static const char no_closure_warning[] =N_(1071"disabling bitmap writing, as some objects are not being packed"1072);10731074static intadd_object_entry(const unsigned char*sha1,enum object_type type,1075const char*name,int exclude)1076{1077struct packed_git *found_pack = NULL;1078 off_t found_offset =0;1079uint32_t index_pos;10801081if(have_duplicate_entry(sha1, exclude, &index_pos))1082return0;10831084if(!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) {1085/* The pack is missing an object, so it will not have closure */1086if(write_bitmap_index) {1087warning(_(no_closure_warning));1088 write_bitmap_index =0;1089}1090return0;1091}10921093create_object_entry(sha1, type,pack_name_hash(name),1094 exclude, name &&no_try_delta(name),1095 index_pos, found_pack, found_offset);10961097display_progress(progress_state, nr_result);1098return1;1099}11001101static intadd_object_entry_from_bitmap(const unsigned char*sha1,1102enum object_type type,1103int flags,uint32_t name_hash,1104struct packed_git *pack, off_t offset)1105{1106uint32_t index_pos;11071108if(have_duplicate_entry(sha1,0, &index_pos))1109return0;11101111if(!want_object_in_pack(sha1,0, &pack, &offset))1112return0;11131114create_object_entry(sha1, type, name_hash,0,0, index_pos, pack, offset);11151116display_progress(progress_state, nr_result);1117return1;1118}11191120struct pbase_tree_cache {1121unsigned char sha1[20];1122int ref;1123int temporary;1124void*tree_data;1125unsigned long tree_size;1126};11271128static struct pbase_tree_cache *(pbase_tree_cache[256]);1129static intpbase_tree_cache_ix(const unsigned char*sha1)1130{1131return sha1[0] %ARRAY_SIZE(pbase_tree_cache);1132}1133static intpbase_tree_cache_ix_incr(int ix)1134{1135return(ix+1) %ARRAY_SIZE(pbase_tree_cache);1136}11371138static struct pbase_tree {1139struct pbase_tree *next;1140/* This is a phony "cache" entry; we are not1141 * going to evict it or find it through _get()1142 * mechanism -- this is for the toplevel node that1143 * would almost always change with any commit.1144 */1145struct pbase_tree_cache pcache;1146} *pbase_tree;11471148static struct pbase_tree_cache *pbase_tree_get(const unsigned char*sha1)1149{1150struct pbase_tree_cache *ent, *nent;1151void*data;1152unsigned long size;1153enum object_type type;1154int neigh;1155int my_ix =pbase_tree_cache_ix(sha1);1156int available_ix = -1;11571158/* pbase-tree-cache acts as a limited hashtable.1159 * your object will be found at your index or within a few1160 * slots after that slot if it is cached.1161 */1162for(neigh =0; neigh <8; neigh++) {1163 ent = pbase_tree_cache[my_ix];1164if(ent && !hashcmp(ent->sha1, sha1)) {1165 ent->ref++;1166return ent;1167}1168else if(((available_ix <0) && (!ent || !ent->ref)) ||1169((0<= available_ix) &&1170(!ent && pbase_tree_cache[available_ix])))1171 available_ix = my_ix;1172if(!ent)1173break;1174 my_ix =pbase_tree_cache_ix_incr(my_ix);1175}11761177/* Did not find one. Either we got a bogus request or1178 * we need to read and perhaps cache.1179 */1180 data =read_sha1_file(sha1, &type, &size);1181if(!data)1182return NULL;1183if(type != OBJ_TREE) {1184free(data);1185return NULL;1186}11871188/* We need to either cache or return a throwaway copy */11891190if(available_ix <0)1191 ent = NULL;1192else{1193 ent = pbase_tree_cache[available_ix];1194 my_ix = available_ix;1195}11961197if(!ent) {1198 nent =xmalloc(sizeof(*nent));1199 nent->temporary = (available_ix <0);1200}1201else{1202/* evict and reuse */1203free(ent->tree_data);1204 nent = ent;1205}1206hashcpy(nent->sha1, sha1);1207 nent->tree_data = data;1208 nent->tree_size = size;1209 nent->ref =1;1210if(!nent->temporary)1211 pbase_tree_cache[my_ix] = nent;1212return nent;1213}12141215static voidpbase_tree_put(struct pbase_tree_cache *cache)1216{1217if(!cache->temporary) {1218 cache->ref--;1219return;1220}1221free(cache->tree_data);1222free(cache);1223}12241225static intname_cmp_len(const char*name)1226{1227int i;1228for(i =0; name[i] && name[i] !='\n'&& name[i] !='/'; i++)1229;1230return i;1231}12321233static voidadd_pbase_object(struct tree_desc *tree,1234const char*name,1235int cmplen,1236const char*fullname)1237{1238struct name_entry entry;1239int cmp;12401241while(tree_entry(tree,&entry)) {1242if(S_ISGITLINK(entry.mode))1243continue;1244 cmp =tree_entry_len(&entry) != cmplen ?1:1245memcmp(name, entry.path, cmplen);1246if(cmp >0)1247continue;1248if(cmp <0)1249return;1250if(name[cmplen] !='/') {1251add_object_entry(entry.oid->hash,1252object_type(entry.mode),1253 fullname,1);1254return;1255}1256if(S_ISDIR(entry.mode)) {1257struct tree_desc sub;1258struct pbase_tree_cache *tree;1259const char*down = name+cmplen+1;1260int downlen =name_cmp_len(down);12611262 tree =pbase_tree_get(entry.oid->hash);1263if(!tree)1264return;1265init_tree_desc(&sub, tree->tree_data, tree->tree_size);12661267add_pbase_object(&sub, down, downlen, fullname);1268pbase_tree_put(tree);1269}1270}1271}12721273static unsigned*done_pbase_paths;1274static int done_pbase_paths_num;1275static int done_pbase_paths_alloc;1276static intdone_pbase_path_pos(unsigned hash)1277{1278int lo =0;1279int hi = done_pbase_paths_num;1280while(lo < hi) {1281int mi = (hi + lo) /2;1282if(done_pbase_paths[mi] == hash)1283return mi;1284if(done_pbase_paths[mi] < hash)1285 hi = mi;1286else1287 lo = mi +1;1288}1289return-lo-1;1290}12911292static intcheck_pbase_path(unsigned hash)1293{1294int pos =done_pbase_path_pos(hash);1295if(0<= pos)1296return1;1297 pos = -pos -1;1298ALLOC_GROW(done_pbase_paths,1299 done_pbase_paths_num +1,1300 done_pbase_paths_alloc);1301 done_pbase_paths_num++;1302if(pos < done_pbase_paths_num)1303MOVE_ARRAY(done_pbase_paths + pos +1, done_pbase_paths + pos,1304 done_pbase_paths_num - pos -1);1305 done_pbase_paths[pos] = hash;1306return0;1307}13081309static voidadd_preferred_base_object(const char*name)1310{1311struct pbase_tree *it;1312int cmplen;1313unsigned hash =pack_name_hash(name);13141315if(!num_preferred_base ||check_pbase_path(hash))1316return;13171318 cmplen =name_cmp_len(name);1319for(it = pbase_tree; it; it = it->next) {1320if(cmplen ==0) {1321add_object_entry(it->pcache.sha1, OBJ_TREE, NULL,1);1322}1323else{1324struct tree_desc tree;1325init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1326add_pbase_object(&tree, name, cmplen, name);1327}1328}1329}13301331static voidadd_preferred_base(unsigned char*sha1)1332{1333struct pbase_tree *it;1334void*data;1335unsigned long size;1336unsigned char tree_sha1[20];13371338if(window <= num_preferred_base++)1339return;13401341 data =read_object_with_reference(sha1, tree_type, &size, tree_sha1);1342if(!data)1343return;13441345for(it = pbase_tree; it; it = it->next) {1346if(!hashcmp(it->pcache.sha1, tree_sha1)) {1347free(data);1348return;1349}1350}13511352 it =xcalloc(1,sizeof(*it));1353 it->next = pbase_tree;1354 pbase_tree = it;13551356hashcpy(it->pcache.sha1, tree_sha1);1357 it->pcache.tree_data = data;1358 it->pcache.tree_size = size;1359}13601361static voidcleanup_preferred_base(void)1362{1363struct pbase_tree *it;1364unsigned i;13651366 it = pbase_tree;1367 pbase_tree = NULL;1368while(it) {1369struct pbase_tree *this= it;1370 it =this->next;1371free(this->pcache.tree_data);1372free(this);1373}13741375for(i =0; i <ARRAY_SIZE(pbase_tree_cache); i++) {1376if(!pbase_tree_cache[i])1377continue;1378free(pbase_tree_cache[i]->tree_data);1379FREE_AND_NULL(pbase_tree_cache[i]);1380}13811382FREE_AND_NULL(done_pbase_paths);1383 done_pbase_paths_num = done_pbase_paths_alloc =0;1384}13851386static voidcheck_object(struct object_entry *entry)1387{1388if(entry->in_pack) {1389struct packed_git *p = entry->in_pack;1390struct pack_window *w_curs = NULL;1391const unsigned char*base_ref = NULL;1392struct object_entry *base_entry;1393unsigned long used, used_0;1394unsigned long avail;1395 off_t ofs;1396unsigned char*buf, c;13971398 buf =use_pack(p, &w_curs, entry->in_pack_offset, &avail);13991400/*1401 * We want in_pack_type even if we do not reuse delta1402 * since non-delta representations could still be reused.1403 */1404 used =unpack_object_header_buffer(buf, avail,1405&entry->in_pack_type,1406&entry->size);1407if(used ==0)1408goto give_up;14091410/*1411 * Determine if this is a delta and if so whether we can1412 * reuse it or not. Otherwise let's find out as cheaply as1413 * possible what the actual type and size for this object is.1414 */1415switch(entry->in_pack_type) {1416default:1417/* Not a delta hence we've already got all we need. */1418 entry->type = entry->in_pack_type;1419 entry->in_pack_header_size = used;1420if(entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)1421goto give_up;1422unuse_pack(&w_curs);1423return;1424case OBJ_REF_DELTA:1425if(reuse_delta && !entry->preferred_base)1426 base_ref =use_pack(p, &w_curs,1427 entry->in_pack_offset + used, NULL);1428 entry->in_pack_header_size = used +20;1429break;1430case OBJ_OFS_DELTA:1431 buf =use_pack(p, &w_curs,1432 entry->in_pack_offset + used, NULL);1433 used_0 =0;1434 c = buf[used_0++];1435 ofs = c &127;1436while(c &128) {1437 ofs +=1;1438if(!ofs ||MSB(ofs,7)) {1439error("delta base offset overflow in pack for%s",1440oid_to_hex(&entry->idx.oid));1441goto give_up;1442}1443 c = buf[used_0++];1444 ofs = (ofs <<7) + (c &127);1445}1446 ofs = entry->in_pack_offset - ofs;1447if(ofs <=0|| ofs >= entry->in_pack_offset) {1448error("delta base offset out of bound for%s",1449oid_to_hex(&entry->idx.oid));1450goto give_up;1451}1452if(reuse_delta && !entry->preferred_base) {1453struct revindex_entry *revidx;1454 revidx =find_pack_revindex(p, ofs);1455if(!revidx)1456goto give_up;1457 base_ref =nth_packed_object_sha1(p, revidx->nr);1458}1459 entry->in_pack_header_size = used + used_0;1460break;1461}14621463if(base_ref && (base_entry =packlist_find(&to_pack, base_ref, NULL))) {1464/*1465 * If base_ref was set above that means we wish to1466 * reuse delta data, and we even found that base1467 * in the list of objects we want to pack. Goodie!1468 *1469 * Depth value does not matter - find_deltas() will1470 * never consider reused delta as the base object to1471 * deltify other objects against, in order to avoid1472 * circular deltas.1473 */1474 entry->type = entry->in_pack_type;1475 entry->delta = base_entry;1476 entry->delta_size = entry->size;1477 entry->delta_sibling = base_entry->delta_child;1478 base_entry->delta_child = entry;1479unuse_pack(&w_curs);1480return;1481}14821483if(entry->type) {1484/*1485 * This must be a delta and we already know what the1486 * final object type is. Let's extract the actual1487 * object size from the delta header.1488 */1489 entry->size =get_size_from_delta(p, &w_curs,1490 entry->in_pack_offset + entry->in_pack_header_size);1491if(entry->size ==0)1492goto give_up;1493unuse_pack(&w_curs);1494return;1495}14961497/*1498 * No choice but to fall back to the recursive delta walk1499 * with sha1_object_info() to find about the object type1500 * at this point...1501 */1502 give_up:1503unuse_pack(&w_curs);1504}15051506 entry->type =sha1_object_info(entry->idx.oid.hash, &entry->size);1507/*1508 * The error condition is checked in prepare_pack(). This is1509 * to permit a missing preferred base object to be ignored1510 * as a preferred base. Doing so can result in a larger1511 * pack file, but the transfer will still take place.1512 */1513}15141515static intpack_offset_sort(const void*_a,const void*_b)1516{1517const struct object_entry *a = *(struct object_entry **)_a;1518const struct object_entry *b = *(struct object_entry **)_b;15191520/* avoid filesystem trashing with loose objects */1521if(!a->in_pack && !b->in_pack)1522returnoidcmp(&a->idx.oid, &b->idx.oid);15231524if(a->in_pack < b->in_pack)1525return-1;1526if(a->in_pack > b->in_pack)1527return1;1528return a->in_pack_offset < b->in_pack_offset ? -1:1529(a->in_pack_offset > b->in_pack_offset);1530}15311532/*1533 * Drop an on-disk delta we were planning to reuse. Naively, this would1534 * just involve blanking out the "delta" field, but we have to deal1535 * with some extra book-keeping:1536 *1537 * 1. Removing ourselves from the delta_sibling linked list.1538 *1539 * 2. Updating our size/type to the non-delta representation. These were1540 * either not recorded initially (size) or overwritten with the delta type1541 * (type) when check_object() decided to reuse the delta.1542 *1543 * 3. Resetting our delta depth, as we are now a base object.1544 */1545static voiddrop_reused_delta(struct object_entry *entry)1546{1547struct object_entry **p = &entry->delta->delta_child;1548struct object_info oi = OBJECT_INFO_INIT;15491550while(*p) {1551if(*p == entry)1552*p = (*p)->delta_sibling;1553else1554 p = &(*p)->delta_sibling;1555}1556 entry->delta = NULL;1557 entry->depth =0;15581559 oi.sizep = &entry->size;1560 oi.typep = &entry->type;1561if(packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) <0) {1562/*1563 * We failed to get the info from this pack for some reason;1564 * fall back to sha1_object_info, which may find another copy.1565 * And if that fails, the error will be recorded in entry->type1566 * and dealt with in prepare_pack().1567 */1568 entry->type =sha1_object_info(entry->idx.oid.hash,1569&entry->size);1570}1571}15721573/*1574 * Follow the chain of deltas from this entry onward, throwing away any links1575 * that cause us to hit a cycle (as determined by the DFS state flags in1576 * the entries).1577 *1578 * We also detect too-long reused chains that would violate our --depth1579 * limit.1580 */1581static voidbreak_delta_chains(struct object_entry *entry)1582{1583/*1584 * The actual depth of each object we will write is stored as an int,1585 * as it cannot exceed our int "depth" limit. But before we break1586 * changes based no that limit, we may potentially go as deep as the1587 * number of objects, which is elsewhere bounded to a uint32_t.1588 */1589uint32_t total_depth;1590struct object_entry *cur, *next;15911592for(cur = entry, total_depth =0;1593 cur;1594 cur = cur->delta, total_depth++) {1595if(cur->dfs_state == DFS_DONE) {1596/*1597 * We've already seen this object and know it isn't1598 * part of a cycle. We do need to append its depth1599 * to our count.1600 */1601 total_depth += cur->depth;1602break;1603}16041605/*1606 * We break cycles before looping, so an ACTIVE state (or any1607 * other cruft which made its way into the state variable)1608 * is a bug.1609 */1610if(cur->dfs_state != DFS_NONE)1611die("BUG: confusing delta dfs state in first pass:%d",1612 cur->dfs_state);16131614/*1615 * Now we know this is the first time we've seen the object. If1616 * it's not a delta, we're done traversing, but we'll mark it1617 * done to save time on future traversals.1618 */1619if(!cur->delta) {1620 cur->dfs_state = DFS_DONE;1621break;1622}16231624/*1625 * Mark ourselves as active and see if the next step causes1626 * us to cycle to another active object. It's important to do1627 * this _before_ we loop, because it impacts where we make the1628 * cut, and thus how our total_depth counter works.1629 * E.g., We may see a partial loop like:1630 *1631 * A -> B -> C -> D -> B1632 *1633 * Cutting B->C breaks the cycle. But now the depth of A is1634 * only 1, and our total_depth counter is at 3. The size of the1635 * error is always one less than the size of the cycle we1636 * broke. Commits C and D were "lost" from A's chain.1637 *1638 * If we instead cut D->B, then the depth of A is correct at 3.1639 * We keep all commits in the chain that we examined.1640 */1641 cur->dfs_state = DFS_ACTIVE;1642if(cur->delta->dfs_state == DFS_ACTIVE) {1643drop_reused_delta(cur);1644 cur->dfs_state = DFS_DONE;1645break;1646}1647}16481649/*1650 * And now that we've gone all the way to the bottom of the chain, we1651 * need to clear the active flags and set the depth fields as1652 * appropriate. Unlike the loop above, which can quit when it drops a1653 * delta, we need to keep going to look for more depth cuts. So we need1654 * an extra "next" pointer to keep going after we reset cur->delta.1655 */1656for(cur = entry; cur; cur = next) {1657 next = cur->delta;16581659/*1660 * We should have a chain of zero or more ACTIVE states down to1661 * a final DONE. We can quit after the DONE, because either it1662 * has no bases, or we've already handled them in a previous1663 * call.1664 */1665if(cur->dfs_state == DFS_DONE)1666break;1667else if(cur->dfs_state != DFS_ACTIVE)1668die("BUG: confusing delta dfs state in second pass:%d",1669 cur->dfs_state);16701671/*1672 * If the total_depth is more than depth, then we need to snip1673 * the chain into two or more smaller chains that don't exceed1674 * the maximum depth. Most of the resulting chains will contain1675 * (depth + 1) entries (i.e., depth deltas plus one base), and1676 * the last chain (i.e., the one containing entry) will contain1677 * whatever entries are left over, namely1678 * (total_depth % (depth + 1)) of them.1679 *1680 * Since we are iterating towards decreasing depth, we need to1681 * decrement total_depth as we go, and we need to write to the1682 * entry what its final depth will be after all of the1683 * snipping. Since we're snipping into chains of length (depth1684 * + 1) entries, the final depth of an entry will be its1685 * original depth modulo (depth + 1). Any time we encounter an1686 * entry whose final depth is supposed to be zero, we snip it1687 * from its delta base, thereby making it so.1688 */1689 cur->depth = (total_depth--) % (depth +1);1690if(!cur->depth)1691drop_reused_delta(cur);16921693 cur->dfs_state = DFS_DONE;1694}1695}16961697static voidget_object_details(void)1698{1699uint32_t i;1700struct object_entry **sorted_by_offset;17011702 sorted_by_offset =xcalloc(to_pack.nr_objects,sizeof(struct object_entry *));1703for(i =0; i < to_pack.nr_objects; i++)1704 sorted_by_offset[i] = to_pack.objects + i;1705QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);17061707for(i =0; i < to_pack.nr_objects; i++) {1708struct object_entry *entry = sorted_by_offset[i];1709check_object(entry);1710if(big_file_threshold < entry->size)1711 entry->no_try_delta =1;1712}17131714/*1715 * This must happen in a second pass, since we rely on the delta1716 * information for the whole list being completed.1717 */1718for(i =0; i < to_pack.nr_objects; i++)1719break_delta_chains(&to_pack.objects[i]);17201721free(sorted_by_offset);1722}17231724/*1725 * We search for deltas in a list sorted by type, by filename hash, and then1726 * by size, so that we see progressively smaller and smaller files.1727 * That's because we prefer deltas to be from the bigger file1728 * to the smaller -- deletes are potentially cheaper, but perhaps1729 * more importantly, the bigger file is likely the more recent1730 * one. The deepest deltas are therefore the oldest objects which are1731 * less susceptible to be accessed often.1732 */1733static inttype_size_sort(const void*_a,const void*_b)1734{1735const struct object_entry *a = *(struct object_entry **)_a;1736const struct object_entry *b = *(struct object_entry **)_b;17371738if(a->type > b->type)1739return-1;1740if(a->type < b->type)1741return1;1742if(a->hash > b->hash)1743return-1;1744if(a->hash < b->hash)1745return1;1746if(a->preferred_base > b->preferred_base)1747return-1;1748if(a->preferred_base < b->preferred_base)1749return1;1750if(a->size > b->size)1751return-1;1752if(a->size < b->size)1753return1;1754return a < b ? -1: (a > b);/* newest first */1755}17561757struct unpacked {1758struct object_entry *entry;1759void*data;1760struct delta_index *index;1761unsigned depth;1762};17631764static intdelta_cacheable(unsigned long src_size,unsigned long trg_size,1765unsigned long delta_size)1766{1767if(max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1768return0;17691770if(delta_size < cache_max_small_delta_size)1771return1;17721773/* cache delta, if objects are large enough compared to delta size */1774if((src_size >>20) + (trg_size >>21) > (delta_size >>10))1775return1;17761777return0;1778}17791780#ifndef NO_PTHREADS17811782static pthread_mutex_t read_mutex;1783#define read_lock() pthread_mutex_lock(&read_mutex)1784#define read_unlock() pthread_mutex_unlock(&read_mutex)17851786static pthread_mutex_t cache_mutex;1787#define cache_lock() pthread_mutex_lock(&cache_mutex)1788#define cache_unlock() pthread_mutex_unlock(&cache_mutex)17891790static pthread_mutex_t progress_mutex;1791#define progress_lock() pthread_mutex_lock(&progress_mutex)1792#define progress_unlock() pthread_mutex_unlock(&progress_mutex)17931794#else17951796#define read_lock() (void)01797#define read_unlock() (void)01798#define cache_lock() (void)01799#define cache_unlock() (void)01800#define progress_lock() (void)01801#define progress_unlock() (void)018021803#endif18041805static inttry_delta(struct unpacked *trg,struct unpacked *src,1806unsigned max_depth,unsigned long*mem_usage)1807{1808struct object_entry *trg_entry = trg->entry;1809struct object_entry *src_entry = src->entry;1810unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1811unsigned ref_depth;1812enum object_type type;1813void*delta_buf;18141815/* Don't bother doing diffs between different types */1816if(trg_entry->type != src_entry->type)1817return-1;18181819/*1820 * We do not bother to try a delta that we discarded on an1821 * earlier try, but only when reusing delta data. Note that1822 * src_entry that is marked as the preferred_base should always1823 * be considered, as even if we produce a suboptimal delta against1824 * it, we will still save the transfer cost, as we already know1825 * the other side has it and we won't send src_entry at all.1826 */1827if(reuse_delta && trg_entry->in_pack &&1828 trg_entry->in_pack == src_entry->in_pack &&1829!src_entry->preferred_base &&1830 trg_entry->in_pack_type != OBJ_REF_DELTA &&1831 trg_entry->in_pack_type != OBJ_OFS_DELTA)1832return0;18331834/* Let's not bust the allowed depth. */1835if(src->depth >= max_depth)1836return0;18371838/* Now some size filtering heuristics. */1839 trg_size = trg_entry->size;1840if(!trg_entry->delta) {1841 max_size = trg_size/2-20;1842 ref_depth =1;1843}else{1844 max_size = trg_entry->delta_size;1845 ref_depth = trg->depth;1846}1847 max_size = (uint64_t)max_size * (max_depth - src->depth) /1848(max_depth - ref_depth +1);1849if(max_size ==0)1850return0;1851 src_size = src_entry->size;1852 sizediff = src_size < trg_size ? trg_size - src_size :0;1853if(sizediff >= max_size)1854return0;1855if(trg_size < src_size /32)1856return0;18571858/* Load data if not already done */1859if(!trg->data) {1860read_lock();1861 trg->data =read_sha1_file(trg_entry->idx.oid.hash, &type,1862&sz);1863read_unlock();1864if(!trg->data)1865die("object%scannot be read",1866oid_to_hex(&trg_entry->idx.oid));1867if(sz != trg_size)1868die("object%sinconsistent object length (%lu vs%lu)",1869oid_to_hex(&trg_entry->idx.oid), sz,1870 trg_size);1871*mem_usage += sz;1872}1873if(!src->data) {1874read_lock();1875 src->data =read_sha1_file(src_entry->idx.oid.hash, &type,1876&sz);1877read_unlock();1878if(!src->data) {1879if(src_entry->preferred_base) {1880static int warned =0;1881if(!warned++)1882warning("object%scannot be read",1883oid_to_hex(&src_entry->idx.oid));1884/*1885 * Those objects are not included in the1886 * resulting pack. Be resilient and ignore1887 * them if they can't be read, in case the1888 * pack could be created nevertheless.1889 */1890return0;1891}1892die("object%scannot be read",1893oid_to_hex(&src_entry->idx.oid));1894}1895if(sz != src_size)1896die("object%sinconsistent object length (%lu vs%lu)",1897oid_to_hex(&src_entry->idx.oid), sz,1898 src_size);1899*mem_usage += sz;1900}1901if(!src->index) {1902 src->index =create_delta_index(src->data, src_size);1903if(!src->index) {1904static int warned =0;1905if(!warned++)1906warning("suboptimal pack - out of memory");1907return0;1908}1909*mem_usage +=sizeof_delta_index(src->index);1910}19111912 delta_buf =create_delta(src->index, trg->data, trg_size, &delta_size, max_size);1913if(!delta_buf)1914return0;19151916if(trg_entry->delta) {1917/* Prefer only shallower same-sized deltas. */1918if(delta_size == trg_entry->delta_size &&1919 src->depth +1>= trg->depth) {1920free(delta_buf);1921return0;1922}1923}19241925/*1926 * Handle memory allocation outside of the cache1927 * accounting lock. Compiler will optimize the strangeness1928 * away when NO_PTHREADS is defined.1929 */1930free(trg_entry->delta_data);1931cache_lock();1932if(trg_entry->delta_data) {1933 delta_cache_size -= trg_entry->delta_size;1934 trg_entry->delta_data = NULL;1935}1936if(delta_cacheable(src_size, trg_size, delta_size)) {1937 delta_cache_size += delta_size;1938cache_unlock();1939 trg_entry->delta_data =xrealloc(delta_buf, delta_size);1940}else{1941cache_unlock();1942free(delta_buf);1943}19441945 trg_entry->delta = src_entry;1946 trg_entry->delta_size = delta_size;1947 trg->depth = src->depth +1;19481949return1;1950}19511952static unsigned intcheck_delta_limit(struct object_entry *me,unsigned int n)1953{1954struct object_entry *child = me->delta_child;1955unsigned int m = n;1956while(child) {1957unsigned int c =check_delta_limit(child, n +1);1958if(m < c)1959 m = c;1960 child = child->delta_sibling;1961}1962return m;1963}19641965static unsigned longfree_unpacked(struct unpacked *n)1966{1967unsigned long freed_mem =sizeof_delta_index(n->index);1968free_delta_index(n->index);1969 n->index = NULL;1970if(n->data) {1971 freed_mem += n->entry->size;1972FREE_AND_NULL(n->data);1973}1974 n->entry = NULL;1975 n->depth =0;1976return freed_mem;1977}19781979static voidfind_deltas(struct object_entry **list,unsigned*list_size,1980int window,int depth,unsigned*processed)1981{1982uint32_t i, idx =0, count =0;1983struct unpacked *array;1984unsigned long mem_usage =0;19851986 array =xcalloc(window,sizeof(struct unpacked));19871988for(;;) {1989struct object_entry *entry;1990struct unpacked *n = array + idx;1991int j, max_depth, best_base = -1;19921993progress_lock();1994if(!*list_size) {1995progress_unlock();1996break;1997}1998 entry = *list++;1999(*list_size)--;2000if(!entry->preferred_base) {2001(*processed)++;2002display_progress(progress_state, *processed);2003}2004progress_unlock();20052006 mem_usage -=free_unpacked(n);2007 n->entry = entry;20082009while(window_memory_limit &&2010 mem_usage > window_memory_limit &&2011 count >1) {2012uint32_t tail = (idx + window - count) % window;2013 mem_usage -=free_unpacked(array + tail);2014 count--;2015}20162017/* We do not compute delta to *create* objects we are not2018 * going to pack.2019 */2020if(entry->preferred_base)2021goto next;20222023/*2024 * If the current object is at pack edge, take the depth the2025 * objects that depend on the current object into account2026 * otherwise they would become too deep.2027 */2028 max_depth = depth;2029if(entry->delta_child) {2030 max_depth -=check_delta_limit(entry,0);2031if(max_depth <=0)2032goto next;2033}20342035 j = window;2036while(--j >0) {2037int ret;2038uint32_t other_idx = idx + j;2039struct unpacked *m;2040if(other_idx >= window)2041 other_idx -= window;2042 m = array + other_idx;2043if(!m->entry)2044break;2045 ret =try_delta(n, m, max_depth, &mem_usage);2046if(ret <0)2047break;2048else if(ret >0)2049 best_base = other_idx;2050}20512052/*2053 * If we decided to cache the delta data, then it is best2054 * to compress it right away. First because we have to do2055 * it anyway, and doing it here while we're threaded will2056 * save a lot of time in the non threaded write phase,2057 * as well as allow for caching more deltas within2058 * the same cache size limit.2059 * ...2060 * But only if not writing to stdout, since in that case2061 * the network is most likely throttling writes anyway,2062 * and therefore it is best to go to the write phase ASAP2063 * instead, as we can afford spending more time compressing2064 * between writes at that moment.2065 */2066if(entry->delta_data && !pack_to_stdout) {2067 entry->z_delta_size =do_compress(&entry->delta_data,2068 entry->delta_size);2069cache_lock();2070 delta_cache_size -= entry->delta_size;2071 delta_cache_size += entry->z_delta_size;2072cache_unlock();2073}20742075/* if we made n a delta, and if n is already at max2076 * depth, leaving it in the window is pointless. we2077 * should evict it first.2078 */2079if(entry->delta && max_depth <= n->depth)2080continue;20812082/*2083 * Move the best delta base up in the window, after the2084 * currently deltified object, to keep it longer. It will2085 * be the first base object to be attempted next.2086 */2087if(entry->delta) {2088struct unpacked swap = array[best_base];2089int dist = (window + idx - best_base) % window;2090int dst = best_base;2091while(dist--) {2092int src = (dst +1) % window;2093 array[dst] = array[src];2094 dst = src;2095}2096 array[dst] = swap;2097}20982099 next:2100 idx++;2101if(count +1< window)2102 count++;2103if(idx >= window)2104 idx =0;2105}21062107for(i =0; i < window; ++i) {2108free_delta_index(array[i].index);2109free(array[i].data);2110}2111free(array);2112}21132114#ifndef NO_PTHREADS21152116static voidtry_to_free_from_threads(size_t size)2117{2118read_lock();2119release_pack_memory(size);2120read_unlock();2121}21222123static try_to_free_t old_try_to_free_routine;21242125/*2126 * The main thread waits on the condition that (at least) one of the workers2127 * has stopped working (which is indicated in the .working member of2128 * struct thread_params).2129 * When a work thread has completed its work, it sets .working to 0 and2130 * signals the main thread and waits on the condition that .data_ready2131 * becomes 1.2132 */21332134struct thread_params {2135 pthread_t thread;2136struct object_entry **list;2137unsigned list_size;2138unsigned remaining;2139int window;2140int depth;2141int working;2142int data_ready;2143 pthread_mutex_t mutex;2144 pthread_cond_t cond;2145unsigned*processed;2146};21472148static pthread_cond_t progress_cond;21492150/*2151 * Mutex and conditional variable can't be statically-initialized on Windows.2152 */2153static voidinit_threaded_search(void)2154{2155init_recursive_mutex(&read_mutex);2156pthread_mutex_init(&cache_mutex, NULL);2157pthread_mutex_init(&progress_mutex, NULL);2158pthread_cond_init(&progress_cond, NULL);2159 old_try_to_free_routine =set_try_to_free_routine(try_to_free_from_threads);2160}21612162static voidcleanup_threaded_search(void)2163{2164set_try_to_free_routine(old_try_to_free_routine);2165pthread_cond_destroy(&progress_cond);2166pthread_mutex_destroy(&read_mutex);2167pthread_mutex_destroy(&cache_mutex);2168pthread_mutex_destroy(&progress_mutex);2169}21702171static void*threaded_find_deltas(void*arg)2172{2173struct thread_params *me = arg;21742175progress_lock();2176while(me->remaining) {2177progress_unlock();21782179find_deltas(me->list, &me->remaining,2180 me->window, me->depth, me->processed);21812182progress_lock();2183 me->working =0;2184pthread_cond_signal(&progress_cond);2185progress_unlock();21862187/*2188 * We must not set ->data_ready before we wait on the2189 * condition because the main thread may have set it to 12190 * before we get here. In order to be sure that new2191 * work is available if we see 1 in ->data_ready, it2192 * was initialized to 0 before this thread was spawned2193 * and we reset it to 0 right away.2194 */2195pthread_mutex_lock(&me->mutex);2196while(!me->data_ready)2197pthread_cond_wait(&me->cond, &me->mutex);2198 me->data_ready =0;2199pthread_mutex_unlock(&me->mutex);22002201progress_lock();2202}2203progress_unlock();2204/* leave ->working 1 so that this doesn't get more work assigned */2205return NULL;2206}22072208static voidll_find_deltas(struct object_entry **list,unsigned list_size,2209int window,int depth,unsigned*processed)2210{2211struct thread_params *p;2212int i, ret, active_threads =0;22132214init_threaded_search();22152216if(delta_search_threads <=1) {2217find_deltas(list, &list_size, window, depth, processed);2218cleanup_threaded_search();2219return;2220}2221if(progress > pack_to_stdout)2222fprintf(stderr,"Delta compression using up to%dthreads.\n",2223 delta_search_threads);2224 p =xcalloc(delta_search_threads,sizeof(*p));22252226/* Partition the work amongst work threads. */2227for(i =0; i < delta_search_threads; i++) {2228unsigned sub_size = list_size / (delta_search_threads - i);22292230/* don't use too small segments or no deltas will be found */2231if(sub_size <2*window && i+1< delta_search_threads)2232 sub_size =0;22332234 p[i].window = window;2235 p[i].depth = depth;2236 p[i].processed = processed;2237 p[i].working =1;2238 p[i].data_ready =0;22392240/* try to split chunks on "path" boundaries */2241while(sub_size && sub_size < list_size &&2242 list[sub_size]->hash &&2243 list[sub_size]->hash == list[sub_size-1]->hash)2244 sub_size++;22452246 p[i].list = list;2247 p[i].list_size = sub_size;2248 p[i].remaining = sub_size;22492250 list += sub_size;2251 list_size -= sub_size;2252}22532254/* Start work threads. */2255for(i =0; i < delta_search_threads; i++) {2256if(!p[i].list_size)2257continue;2258pthread_mutex_init(&p[i].mutex, NULL);2259pthread_cond_init(&p[i].cond, NULL);2260 ret =pthread_create(&p[i].thread, NULL,2261 threaded_find_deltas, &p[i]);2262if(ret)2263die("unable to create thread:%s",strerror(ret));2264 active_threads++;2265}22662267/*2268 * Now let's wait for work completion. Each time a thread is done2269 * with its work, we steal half of the remaining work from the2270 * thread with the largest number of unprocessed objects and give2271 * it to that newly idle thread. This ensure good load balancing2272 * until the remaining object list segments are simply too short2273 * to be worth splitting anymore.2274 */2275while(active_threads) {2276struct thread_params *target = NULL;2277struct thread_params *victim = NULL;2278unsigned sub_size =0;22792280progress_lock();2281for(;;) {2282for(i =0; !target && i < delta_search_threads; i++)2283if(!p[i].working)2284 target = &p[i];2285if(target)2286break;2287pthread_cond_wait(&progress_cond, &progress_mutex);2288}22892290for(i =0; i < delta_search_threads; i++)2291if(p[i].remaining >2*window &&2292(!victim || victim->remaining < p[i].remaining))2293 victim = &p[i];2294if(victim) {2295 sub_size = victim->remaining /2;2296 list = victim->list + victim->list_size - sub_size;2297while(sub_size && list[0]->hash &&2298 list[0]->hash == list[-1]->hash) {2299 list++;2300 sub_size--;2301}2302if(!sub_size) {2303/*2304 * It is possible for some "paths" to have2305 * so many objects that no hash boundary2306 * might be found. Let's just steal the2307 * exact half in that case.2308 */2309 sub_size = victim->remaining /2;2310 list -= sub_size;2311}2312 target->list = list;2313 victim->list_size -= sub_size;2314 victim->remaining -= sub_size;2315}2316 target->list_size = sub_size;2317 target->remaining = sub_size;2318 target->working =1;2319progress_unlock();23202321pthread_mutex_lock(&target->mutex);2322 target->data_ready =1;2323pthread_cond_signal(&target->cond);2324pthread_mutex_unlock(&target->mutex);23252326if(!sub_size) {2327pthread_join(target->thread, NULL);2328pthread_cond_destroy(&target->cond);2329pthread_mutex_destroy(&target->mutex);2330 active_threads--;2331}2332}2333cleanup_threaded_search();2334free(p);2335}23362337#else2338#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2339#endif23402341static voidadd_tag_chain(const struct object_id *oid)2342{2343struct tag *tag;23442345/*2346 * We catch duplicates already in add_object_entry(), but we'd2347 * prefer to do this extra check to avoid having to parse the2348 * tag at all if we already know that it's being packed (e.g., if2349 * it was included via bitmaps, we would not have parsed it2350 * previously).2351 */2352if(packlist_find(&to_pack, oid->hash, NULL))2353return;23542355 tag =lookup_tag(oid);2356while(1) {2357if(!tag ||parse_tag(tag) || !tag->tagged)2358die("unable to pack objects reachable from tag%s",2359oid_to_hex(oid));23602361add_object_entry(tag->object.oid.hash, OBJ_TAG, NULL,0);23622363if(tag->tagged->type != OBJ_TAG)2364return;23652366 tag = (struct tag *)tag->tagged;2367}2368}23692370static intadd_ref_tag(const char*path,const struct object_id *oid,int flag,void*cb_data)2371{2372struct object_id peeled;23732374if(starts_with(path,"refs/tags/") &&/* is a tag? */2375!peel_ref(path, peeled.hash) &&/* peelable? */2376packlist_find(&to_pack, peeled.hash, NULL))/* object packed? */2377add_tag_chain(oid);2378return0;2379}23802381static voidprepare_pack(int window,int depth)2382{2383struct object_entry **delta_list;2384uint32_t i, nr_deltas;2385unsigned n;23862387get_object_details();23882389/*2390 * If we're locally repacking then we need to be doubly careful2391 * from now on in order to make sure no stealth corruption gets2392 * propagated to the new pack. Clients receiving streamed packs2393 * should validate everything they get anyway so no need to incur2394 * the additional cost here in that case.2395 */2396if(!pack_to_stdout)2397 do_check_packed_object_crc =1;23982399if(!to_pack.nr_objects || !window || !depth)2400return;24012402ALLOC_ARRAY(delta_list, to_pack.nr_objects);2403 nr_deltas = n =0;24042405for(i =0; i < to_pack.nr_objects; i++) {2406struct object_entry *entry = to_pack.objects + i;24072408if(entry->delta)2409/* This happens if we decided to reuse existing2410 * delta from a pack. "reuse_delta &&" is implied.2411 */2412continue;24132414if(entry->size <50)2415continue;24162417if(entry->no_try_delta)2418continue;24192420if(!entry->preferred_base) {2421 nr_deltas++;2422if(entry->type <0)2423die("unable to get type of object%s",2424oid_to_hex(&entry->idx.oid));2425}else{2426if(entry->type <0) {2427/*2428 * This object is not found, but we2429 * don't have to include it anyway.2430 */2431continue;2432}2433}24342435 delta_list[n++] = entry;2436}24372438if(nr_deltas && n >1) {2439unsigned nr_done =0;2440if(progress)2441 progress_state =start_progress(_("Compressing objects"),2442 nr_deltas);2443QSORT(delta_list, n, type_size_sort);2444ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2445stop_progress(&progress_state);2446if(nr_done != nr_deltas)2447die("inconsistency with delta count");2448}2449free(delta_list);2450}24512452static intgit_pack_config(const char*k,const char*v,void*cb)2453{2454if(!strcmp(k,"pack.window")) {2455 window =git_config_int(k, v);2456return0;2457}2458if(!strcmp(k,"pack.windowmemory")) {2459 window_memory_limit =git_config_ulong(k, v);2460return0;2461}2462if(!strcmp(k,"pack.depth")) {2463 depth =git_config_int(k, v);2464return0;2465}2466if(!strcmp(k,"pack.deltacachesize")) {2467 max_delta_cache_size =git_config_int(k, v);2468return0;2469}2470if(!strcmp(k,"pack.deltacachelimit")) {2471 cache_max_small_delta_size =git_config_int(k, v);2472return0;2473}2474if(!strcmp(k,"pack.writebitmaphashcache")) {2475if(git_config_bool(k, v))2476 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2477else2478 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2479}2480if(!strcmp(k,"pack.usebitmaps")) {2481 use_bitmap_index_default =git_config_bool(k, v);2482return0;2483}2484if(!strcmp(k,"pack.threads")) {2485 delta_search_threads =git_config_int(k, v);2486if(delta_search_threads <0)2487die("invalid number of threads specified (%d)",2488 delta_search_threads);2489#ifdef NO_PTHREADS2490if(delta_search_threads !=1) {2491warning("no threads support, ignoring%s", k);2492 delta_search_threads =0;2493}2494#endif2495return0;2496}2497if(!strcmp(k,"pack.indexversion")) {2498 pack_idx_opts.version =git_config_int(k, v);2499if(pack_idx_opts.version >2)2500die("bad pack.indexversion=%"PRIu32,2501 pack_idx_opts.version);2502return0;2503}2504returngit_default_config(k, v, cb);2505}25062507static voidread_object_list_from_stdin(void)2508{2509char line[40+1+ PATH_MAX +2];2510unsigned char sha1[20];25112512for(;;) {2513if(!fgets(line,sizeof(line), stdin)) {2514if(feof(stdin))2515break;2516if(!ferror(stdin))2517die("fgets returned NULL, not EOF, not error!");2518if(errno != EINTR)2519die_errno("fgets");2520clearerr(stdin);2521continue;2522}2523if(line[0] =='-') {2524if(get_sha1_hex(line+1, sha1))2525die("expected edge sha1, got garbage:\n%s",2526 line);2527add_preferred_base(sha1);2528continue;2529}2530if(get_sha1_hex(line, sha1))2531die("expected sha1, got garbage:\n%s", line);25322533add_preferred_base_object(line+41);2534add_object_entry(sha1,0, line+41,0);2535}2536}25372538#define OBJECT_ADDED (1u<<20)25392540static voidshow_commit(struct commit *commit,void*data)2541{2542add_object_entry(commit->object.oid.hash, OBJ_COMMIT, NULL,0);2543 commit->object.flags |= OBJECT_ADDED;25442545if(write_bitmap_index)2546index_commit_for_bitmap(commit);2547}25482549static voidshow_object(struct object *obj,const char*name,void*data)2550{2551add_preferred_base_object(name);2552add_object_entry(obj->oid.hash, obj->type, name,0);2553 obj->flags |= OBJECT_ADDED;2554}25552556static voidshow_edge(struct commit *commit)2557{2558add_preferred_base(commit->object.oid.hash);2559}25602561struct in_pack_object {2562 off_t offset;2563struct object *object;2564};25652566struct in_pack {2567int alloc;2568int nr;2569struct in_pack_object *array;2570};25712572static voidmark_in_pack_object(struct object *object,struct packed_git *p,struct in_pack *in_pack)2573{2574 in_pack->array[in_pack->nr].offset =find_pack_entry_one(object->oid.hash, p);2575 in_pack->array[in_pack->nr].object = object;2576 in_pack->nr++;2577}25782579/*2580 * Compare the objects in the offset order, in order to emulate the2581 * "git rev-list --objects" output that produced the pack originally.2582 */2583static intofscmp(const void*a_,const void*b_)2584{2585struct in_pack_object *a = (struct in_pack_object *)a_;2586struct in_pack_object *b = (struct in_pack_object *)b_;25872588if(a->offset < b->offset)2589return-1;2590else if(a->offset > b->offset)2591return1;2592else2593returnoidcmp(&a->object->oid, &b->object->oid);2594}25952596static voidadd_objects_in_unpacked_packs(struct rev_info *revs)2597{2598struct packed_git *p;2599struct in_pack in_pack;2600uint32_t i;26012602memset(&in_pack,0,sizeof(in_pack));26032604for(p = packed_git; p; p = p->next) {2605const unsigned char*sha1;2606struct object *o;26072608if(!p->pack_local || p->pack_keep)2609continue;2610if(open_pack_index(p))2611die("cannot open pack index");26122613ALLOC_GROW(in_pack.array,2614 in_pack.nr + p->num_objects,2615 in_pack.alloc);26162617for(i =0; i < p->num_objects; i++) {2618 sha1 =nth_packed_object_sha1(p, i);2619 o =lookup_unknown_object(sha1);2620if(!(o->flags & OBJECT_ADDED))2621mark_in_pack_object(o, p, &in_pack);2622 o->flags |= OBJECT_ADDED;2623}2624}26252626if(in_pack.nr) {2627QSORT(in_pack.array, in_pack.nr, ofscmp);2628for(i =0; i < in_pack.nr; i++) {2629struct object *o = in_pack.array[i].object;2630add_object_entry(o->oid.hash, o->type,"",0);2631}2632}2633free(in_pack.array);2634}26352636static intadd_loose_object(const struct object_id *oid,const char*path,2637void*data)2638{2639enum object_type type =sha1_object_info(oid->hash, NULL);26402641if(type <0) {2642warning("loose object at%scould not be examined", path);2643return0;2644}26452646add_object_entry(oid->hash, type,"",0);2647return0;2648}26492650/*2651 * We actually don't even have to worry about reachability here.2652 * add_object_entry will weed out duplicates, so we just add every2653 * loose object we find.2654 */2655static voidadd_unreachable_loose_objects(void)2656{2657for_each_loose_file_in_objdir(get_object_directory(),2658 add_loose_object,2659 NULL, NULL, NULL);2660}26612662static inthas_sha1_pack_kept_or_nonlocal(const unsigned char*sha1)2663{2664static struct packed_git *last_found = (void*)1;2665struct packed_git *p;26662667 p = (last_found != (void*)1) ? last_found : packed_git;26682669while(p) {2670if((!p->pack_local || p->pack_keep) &&2671find_pack_entry_one(sha1, p)) {2672 last_found = p;2673return1;2674}2675if(p == last_found)2676 p = packed_git;2677else2678 p = p->next;2679if(p == last_found)2680 p = p->next;2681}2682return0;2683}26842685/*2686 * Store a list of sha1s that are should not be discarded2687 * because they are either written too recently, or are2688 * reachable from another object that was.2689 *2690 * This is filled by get_object_list.2691 */2692static struct oid_array recent_objects;26932694static intloosened_object_can_be_discarded(const struct object_id *oid,2695 timestamp_t mtime)2696{2697if(!unpack_unreachable_expiration)2698return0;2699if(mtime > unpack_unreachable_expiration)2700return0;2701if(oid_array_lookup(&recent_objects, oid) >=0)2702return0;2703return1;2704}27052706static voidloosen_unused_packed_objects(struct rev_info *revs)2707{2708struct packed_git *p;2709uint32_t i;2710struct object_id oid;27112712for(p = packed_git; p; p = p->next) {2713if(!p->pack_local || p->pack_keep)2714continue;27152716if(open_pack_index(p))2717die("cannot open pack index");27182719for(i =0; i < p->num_objects; i++) {2720nth_packed_object_oid(&oid, p, i);2721if(!packlist_find(&to_pack, oid.hash, NULL) &&2722!has_sha1_pack_kept_or_nonlocal(oid.hash) &&2723!loosened_object_can_be_discarded(&oid, p->mtime))2724if(force_object_loose(oid.hash, p->mtime))2725die("unable to force loose object");2726}2727}2728}27292730/*2731 * This tracks any options which pack-reuse code expects to be on, or which a2732 * reader of the pack might not understand, and which would therefore prevent2733 * blind reuse of what we have on disk.2734 */2735static intpack_options_allow_reuse(void)2736{2737return pack_to_stdout &&2738 allow_ofs_delta &&2739!ignore_packed_keep &&2740(!local || !have_non_local_packs) &&2741!incremental;2742}27432744static intget_object_list_from_bitmap(struct rev_info *revs)2745{2746if(prepare_bitmap_walk(revs) <0)2747return-1;27482749if(pack_options_allow_reuse() &&2750!reuse_partial_packfile_from_bitmap(2751&reuse_packfile,2752&reuse_packfile_objects,2753&reuse_packfile_offset)) {2754assert(reuse_packfile_objects);2755 nr_result += reuse_packfile_objects;2756display_progress(progress_state, nr_result);2757}27582759traverse_bitmap_commit_list(&add_object_entry_from_bitmap);2760return0;2761}27622763static voidrecord_recent_object(struct object *obj,2764const char*name,2765void*data)2766{2767oid_array_append(&recent_objects, &obj->oid);2768}27692770static voidrecord_recent_commit(struct commit *commit,void*data)2771{2772oid_array_append(&recent_objects, &commit->object.oid);2773}27742775static voidget_object_list(int ac,const char**av)2776{2777struct rev_info revs;2778char line[1000];2779int flags =0;27802781init_revisions(&revs, NULL);2782 save_commit_buffer =0;2783setup_revisions(ac, av, &revs, NULL);27842785/* make sure shallows are read */2786is_repository_shallow();27872788while(fgets(line,sizeof(line), stdin) != NULL) {2789int len =strlen(line);2790if(len && line[len -1] =='\n')2791 line[--len] =0;2792if(!len)2793break;2794if(*line =='-') {2795if(!strcmp(line,"--not")) {2796 flags ^= UNINTERESTING;2797 write_bitmap_index =0;2798continue;2799}2800if(starts_with(line,"--shallow ")) {2801struct object_id oid;2802if(get_oid_hex(line +10, &oid))2803die("not an SHA-1 '%s'", line +10);2804register_shallow(&oid);2805 use_bitmap_index =0;2806continue;2807}2808die("not a rev '%s'", line);2809}2810if(handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))2811die("bad revision '%s'", line);2812}28132814if(use_bitmap_index && !get_object_list_from_bitmap(&revs))2815return;28162817if(prepare_revision_walk(&revs))2818die("revision walk setup failed");2819mark_edges_uninteresting(&revs, show_edge);2820traverse_commit_list(&revs, show_commit, show_object, NULL);28212822if(unpack_unreachable_expiration) {2823 revs.ignore_missing_links =1;2824if(add_unseen_recent_objects_to_traversal(&revs,2825 unpack_unreachable_expiration))2826die("unable to add recent objects");2827if(prepare_revision_walk(&revs))2828die("revision walk setup failed");2829traverse_commit_list(&revs, record_recent_commit,2830 record_recent_object, NULL);2831}28322833if(keep_unreachable)2834add_objects_in_unpacked_packs(&revs);2835if(pack_loose_unreachable)2836add_unreachable_loose_objects();2837if(unpack_unreachable)2838loosen_unused_packed_objects(&revs);28392840oid_array_clear(&recent_objects);2841}28422843static intoption_parse_index_version(const struct option *opt,2844const char*arg,int unset)2845{2846char*c;2847const char*val = arg;2848 pack_idx_opts.version =strtoul(val, &c,10);2849if(pack_idx_opts.version >2)2850die(_("unsupported index version%s"), val);2851if(*c ==','&& c[1])2852 pack_idx_opts.off32_limit =strtoul(c+1, &c,0);2853if(*c || pack_idx_opts.off32_limit &0x80000000)2854die(_("bad index version '%s'"), val);2855return0;2856}28572858static intoption_parse_unpack_unreachable(const struct option *opt,2859const char*arg,int unset)2860{2861if(unset) {2862 unpack_unreachable =0;2863 unpack_unreachable_expiration =0;2864}2865else{2866 unpack_unreachable =1;2867if(arg)2868 unpack_unreachable_expiration =approxidate(arg);2869}2870return0;2871}28722873intcmd_pack_objects(int argc,const char**argv,const char*prefix)2874{2875int use_internal_rev_list =0;2876int thin =0;2877int shallow =0;2878int all_progress_implied =0;2879struct argv_array rp = ARGV_ARRAY_INIT;2880int rev_list_unpacked =0, rev_list_all =0, rev_list_reflog =0;2881int rev_list_index =0;2882struct option pack_objects_options[] = {2883OPT_SET_INT('q',"quiet", &progress,2884N_("do not show progress meter"),0),2885OPT_SET_INT(0,"progress", &progress,2886N_("show progress meter"),1),2887OPT_SET_INT(0,"all-progress", &progress,2888N_("show progress meter during object writing phase"),2),2889OPT_BOOL(0,"all-progress-implied",2890&all_progress_implied,2891N_("similar to --all-progress when progress meter is shown")),2892{ OPTION_CALLBACK,0,"index-version", NULL,N_("version[,offset]"),2893N_("write the pack index file in the specified idx format version"),28940, option_parse_index_version },2895OPT_MAGNITUDE(0,"max-pack-size", &pack_size_limit,2896N_("maximum size of each output pack file")),2897OPT_BOOL(0,"local", &local,2898N_("ignore borrowed objects from alternate object store")),2899OPT_BOOL(0,"incremental", &incremental,2900N_("ignore packed objects")),2901OPT_INTEGER(0,"window", &window,2902N_("limit pack window by objects")),2903OPT_MAGNITUDE(0,"window-memory", &window_memory_limit,2904N_("limit pack window by memory in addition to object limit")),2905OPT_INTEGER(0,"depth", &depth,2906N_("maximum length of delta chain allowed in the resulting pack")),2907OPT_BOOL(0,"reuse-delta", &reuse_delta,2908N_("reuse existing deltas")),2909OPT_BOOL(0,"reuse-object", &reuse_object,2910N_("reuse existing objects")),2911OPT_BOOL(0,"delta-base-offset", &allow_ofs_delta,2912N_("use OFS_DELTA objects")),2913OPT_INTEGER(0,"threads", &delta_search_threads,2914N_("use threads when searching for best delta matches")),2915OPT_BOOL(0,"non-empty", &non_empty,2916N_("do not create an empty pack output")),2917OPT_BOOL(0,"revs", &use_internal_rev_list,2918N_("read revision arguments from standard input")),2919{ OPTION_SET_INT,0,"unpacked", &rev_list_unpacked, NULL,2920N_("limit the objects to those that are not yet packed"),2921 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2922{ OPTION_SET_INT,0,"all", &rev_list_all, NULL,2923N_("include objects reachable from any reference"),2924 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2925{ OPTION_SET_INT,0,"reflog", &rev_list_reflog, NULL,2926N_("include objects referred by reflog entries"),2927 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2928{ OPTION_SET_INT,0,"indexed-objects", &rev_list_index, NULL,2929N_("include objects referred to by the index"),2930 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2931OPT_BOOL(0,"stdout", &pack_to_stdout,2932N_("output pack to stdout")),2933OPT_BOOL(0,"include-tag", &include_tag,2934N_("include tag objects that refer to objects to be packed")),2935OPT_BOOL(0,"keep-unreachable", &keep_unreachable,2936N_("keep unreachable objects")),2937OPT_BOOL(0,"pack-loose-unreachable", &pack_loose_unreachable,2938N_("pack loose unreachable objects")),2939{ OPTION_CALLBACK,0,"unpack-unreachable", NULL,N_("time"),2940N_("unpack unreachable objects newer than <time>"),2941 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },2942OPT_BOOL(0,"thin", &thin,2943N_("create thin packs")),2944OPT_BOOL(0,"shallow", &shallow,2945N_("create packs suitable for shallow fetches")),2946OPT_BOOL(0,"honor-pack-keep", &ignore_packed_keep,2947N_("ignore packs that have companion .keep file")),2948OPT_INTEGER(0,"compression", &pack_compression_level,2949N_("pack compression level")),2950OPT_SET_INT(0,"keep-true-parents", &grafts_replace_parents,2951N_("do not hide commits by grafts"),0),2952OPT_BOOL(0,"use-bitmap-index", &use_bitmap_index,2953N_("use a bitmap index if available to speed up counting objects")),2954OPT_BOOL(0,"write-bitmap-index", &write_bitmap_index,2955N_("write a bitmap index together with the pack index")),2956OPT_END(),2957};29582959 check_replace_refs =0;29602961reset_pack_idx_option(&pack_idx_opts);2962git_config(git_pack_config, NULL);29632964 progress =isatty(2);2965 argc =parse_options(argc, argv, prefix, pack_objects_options,2966 pack_usage,0);29672968if(argc) {2969 base_name = argv[0];2970 argc--;2971}2972if(pack_to_stdout != !base_name || argc)2973usage_with_options(pack_usage, pack_objects_options);29742975argv_array_push(&rp,"pack-objects");2976if(thin) {2977 use_internal_rev_list =1;2978argv_array_push(&rp, shallow2979?"--objects-edge-aggressive"2980:"--objects-edge");2981}else2982argv_array_push(&rp,"--objects");29832984if(rev_list_all) {2985 use_internal_rev_list =1;2986argv_array_push(&rp,"--all");2987}2988if(rev_list_reflog) {2989 use_internal_rev_list =1;2990argv_array_push(&rp,"--reflog");2991}2992if(rev_list_index) {2993 use_internal_rev_list =1;2994argv_array_push(&rp,"--indexed-objects");2995}2996if(rev_list_unpacked) {2997 use_internal_rev_list =1;2998argv_array_push(&rp,"--unpacked");2999}30003001if(!reuse_object)3002 reuse_delta =0;3003if(pack_compression_level == -1)3004 pack_compression_level = Z_DEFAULT_COMPRESSION;3005else if(pack_compression_level <0|| pack_compression_level > Z_BEST_COMPRESSION)3006die("bad pack compression level%d", pack_compression_level);30073008if(!delta_search_threads)/* --threads=0 means autodetect */3009 delta_search_threads =online_cpus();30103011#ifdef NO_PTHREADS3012if(delta_search_threads !=1)3013warning("no threads support, ignoring --threads");3014#endif3015if(!pack_to_stdout && !pack_size_limit)3016 pack_size_limit = pack_size_limit_cfg;3017if(pack_to_stdout && pack_size_limit)3018die("--max-pack-size cannot be used to build a pack for transfer.");3019if(pack_size_limit && pack_size_limit <1024*1024) {3020warning("minimum pack size limit is 1 MiB");3021 pack_size_limit =1024*1024;3022}30233024if(!pack_to_stdout && thin)3025die("--thin cannot be used to build an indexable pack.");30263027if(keep_unreachable && unpack_unreachable)3028die("--keep-unreachable and --unpack-unreachable are incompatible.");3029if(!rev_list_all || !rev_list_reflog || !rev_list_index)3030 unpack_unreachable_expiration =0;30313032/*3033 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3034 *3035 * - to produce good pack (with bitmap index not-yet-packed objects are3036 * packed in suboptimal order).3037 *3038 * - to use more robust pack-generation codepath (avoiding possible3039 * bugs in bitmap code and possible bitmap index corruption).3040 */3041if(!pack_to_stdout)3042 use_bitmap_index_default =0;30433044if(use_bitmap_index <0)3045 use_bitmap_index = use_bitmap_index_default;30463047/* "hard" reasons not to use bitmaps; these just won't work at all */3048if(!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) ||is_repository_shallow())3049 use_bitmap_index =0;30503051if(pack_to_stdout || !rev_list_all)3052 write_bitmap_index =0;30533054if(progress && all_progress_implied)3055 progress =2;30563057prepare_packed_git();3058if(ignore_packed_keep) {3059struct packed_git *p;3060for(p = packed_git; p; p = p->next)3061if(p->pack_local && p->pack_keep)3062break;3063if(!p)/* no keep-able packs found */3064 ignore_packed_keep =0;3065}3066if(local) {3067/*3068 * unlike ignore_packed_keep above, we do not want to3069 * unset "local" based on looking at packs, as it3070 * also covers non-local objects3071 */3072struct packed_git *p;3073for(p = packed_git; p; p = p->next) {3074if(!p->pack_local) {3075 have_non_local_packs =1;3076break;3077}3078}3079}30803081if(progress)3082 progress_state =start_progress(_("Counting objects"),0);3083if(!use_internal_rev_list)3084read_object_list_from_stdin();3085else{3086get_object_list(rp.argc, rp.argv);3087argv_array_clear(&rp);3088}3089cleanup_preferred_base();3090if(include_tag && nr_result)3091for_each_ref(add_ref_tag, NULL);3092stop_progress(&progress_state);30933094if(non_empty && !nr_result)3095return0;3096if(nr_result)3097prepare_pack(window, depth);3098write_pack_file();3099if(progress)3100fprintf(stderr,"Total %"PRIu32" (delta %"PRIu32"),"3101" reused %"PRIu32" (delta %"PRIu32")\n",3102 written, written_delta, reused, reused_delta);3103return0;3104}