1#include"../cache.h" 2#include"../config.h" 3#include"../refs.h" 4#include"refs-internal.h" 5#include"ref-cache.h" 6#include"packed-backend.h" 7#include"../iterator.h" 8#include"../lockfile.h" 9 10enum mmap_strategy { 11/* 12 * Don't use mmap() at all for reading `packed-refs`. 13 */ 14 MMAP_NONE, 15 16/* 17 * Can use mmap() for reading `packed-refs`, but the file must 18 * not remain mmapped. This is the usual option on Windows, 19 * where you cannot rename a new version of a file onto a file 20 * that is currently mmapped. 21 */ 22 MMAP_TEMPORARY, 23 24/* 25 * It is OK to leave the `packed-refs` file mmapped while 26 * arbitrary other code is running. 27 */ 28 MMAP_OK 29}; 30 31#if defined(NO_MMAP) 32static enum mmap_strategy mmap_strategy = MMAP_NONE; 33#elif defined(MMAP_PREVENTS_DELETE) 34static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY; 35#else 36static enum mmap_strategy mmap_strategy = MMAP_OK; 37#endif 38 39struct packed_ref_store; 40 41struct packed_ref_cache { 42/* 43 * A back-pointer to the packed_ref_store with which this 44 * cache is associated: 45 */ 46struct packed_ref_store *refs; 47 48struct ref_cache *cache; 49 50/* Is the `packed-refs` file currently mmapped? */ 51int mmapped; 52 53/* 54 * The contents of the `packed-refs` file. If the file was 55 * already sorted, this points at the mmapped contents of the 56 * file. If not, this points at heap-allocated memory 57 * containing the contents, sorted. If there were no contents 58 * (e.g., because the file didn't exist), `buf` and `eof` are 59 * both NULL. 60 */ 61char*buf, *eof; 62 63/* The size of the header line, if any; otherwise, 0: */ 64size_t header_len; 65 66/* 67 * What is the peeled state of this cache? (This is usually 68 * determined from the header of the "packed-refs" file.) 69 */ 70enum{ PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled; 71 72/* 73 * Count of references to the data structure in this instance, 74 * including the pointer from files_ref_store::packed if any. 75 * The data will not be freed as long as the reference count 76 * is nonzero. 77 */ 78unsigned int referrers; 79 80/* The metadata from when this packed-refs cache was read */ 81struct stat_validity validity; 82}; 83 84/* 85 * A container for `packed-refs`-related data. It is not (yet) a 86 * `ref_store`. 87 */ 88struct packed_ref_store { 89struct ref_store base; 90 91unsigned int store_flags; 92 93/* The path of the "packed-refs" file: */ 94char*path; 95 96/* 97 * A cache of the values read from the `packed-refs` file, if 98 * it might still be current; otherwise, NULL. 99 */ 100struct packed_ref_cache *cache; 101 102/* 103 * Lock used for the "packed-refs" file. Note that this (and 104 * thus the enclosing `packed_ref_store`) must not be freed. 105 */ 106struct lock_file lock; 107 108/* 109 * Temporary file used when rewriting new contents to the 110 * "packed-refs" file. Note that this (and thus the enclosing 111 * `packed_ref_store`) must not be freed. 112 */ 113struct tempfile tempfile; 114}; 115 116/* 117 * Increment the reference count of *packed_refs. 118 */ 119static voidacquire_packed_ref_cache(struct packed_ref_cache *packed_refs) 120{ 121 packed_refs->referrers++; 122} 123 124/* 125 * If the buffer in `packed_refs` is active, then either munmap the 126 * memory and close the file, or free the memory. Then set the buffer 127 * pointers to NULL. 128 */ 129static voidrelease_packed_ref_buffer(struct packed_ref_cache *packed_refs) 130{ 131if(packed_refs->mmapped) { 132if(munmap(packed_refs->buf, 133 packed_refs->eof - packed_refs->buf)) 134die_errno("error ummapping packed-refs file%s", 135 packed_refs->refs->path); 136 packed_refs->mmapped =0; 137}else{ 138free(packed_refs->buf); 139} 140 packed_refs->buf = packed_refs->eof = NULL; 141 packed_refs->header_len =0; 142} 143 144/* 145 * Decrease the reference count of *packed_refs. If it goes to zero, 146 * free *packed_refs and return true; otherwise return false. 147 */ 148static intrelease_packed_ref_cache(struct packed_ref_cache *packed_refs) 149{ 150if(!--packed_refs->referrers) { 151free_ref_cache(packed_refs->cache); 152stat_validity_clear(&packed_refs->validity); 153release_packed_ref_buffer(packed_refs); 154free(packed_refs); 155return1; 156}else{ 157return0; 158} 159} 160 161struct ref_store *packed_ref_store_create(const char*path, 162unsigned int store_flags) 163{ 164struct packed_ref_store *refs =xcalloc(1,sizeof(*refs)); 165struct ref_store *ref_store = (struct ref_store *)refs; 166 167base_ref_store_init(ref_store, &refs_be_packed); 168 refs->store_flags = store_flags; 169 170 refs->path =xstrdup(path); 171return ref_store; 172} 173 174/* 175 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 176 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 177 * support at least the flags specified in `required_flags`. `caller` 178 * is used in any necessary error messages. 179 */ 180static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 181unsigned int required_flags, 182const char*caller) 183{ 184struct packed_ref_store *refs; 185 186if(ref_store->be != &refs_be_packed) 187die("BUG: ref_store is type\"%s\"not\"packed\"in%s", 188 ref_store->be->name, caller); 189 190 refs = (struct packed_ref_store *)ref_store; 191 192if((refs->store_flags & required_flags) != required_flags) 193die("BUG: unallowed operation (%s), requires%x, has%x\n", 194 caller, required_flags, refs->store_flags); 195 196return refs; 197} 198 199static voidclear_packed_ref_cache(struct packed_ref_store *refs) 200{ 201if(refs->cache) { 202struct packed_ref_cache *cache = refs->cache; 203 204 refs->cache = NULL; 205release_packed_ref_cache(cache); 206} 207} 208 209static NORETURN voiddie_unterminated_line(const char*path, 210const char*p,size_t len) 211{ 212if(len <80) 213die("unterminated line in%s: %.*s", path, (int)len, p); 214else 215die("unterminated line in%s: %.75s...", path, p); 216} 217 218static NORETURN voiddie_invalid_line(const char*path, 219const char*p,size_t len) 220{ 221const char*eol =memchr(p,'\n', len); 222 223if(!eol) 224die_unterminated_line(path, p, len); 225else if(eol - p <80) 226die("unexpected line in%s: %.*s", path, (int)(eol - p), p); 227else 228die("unexpected line in%s: %.75s...", path, p); 229 230} 231 232/* 233 * An iterator over a packed-refs file that is currently mmapped. 234 */ 235struct mmapped_ref_iterator { 236struct ref_iterator base; 237 238struct packed_ref_cache *packed_refs; 239 240/* The current position in the mmapped file: */ 241const char*pos; 242 243/* The end of the mmapped file: */ 244const char*eof; 245 246struct object_id oid, peeled; 247 248struct strbuf refname_buf; 249}; 250 251static intmmapped_ref_iterator_advance(struct ref_iterator *ref_iterator) 252{ 253struct mmapped_ref_iterator *iter = 254(struct mmapped_ref_iterator *)ref_iterator; 255const char*p = iter->pos, *eol; 256 257strbuf_reset(&iter->refname_buf); 258 259if(iter->pos == iter->eof) 260returnref_iterator_abort(ref_iterator); 261 262 iter->base.flags = REF_ISPACKED; 263 264if(iter->eof - p < GIT_SHA1_HEXSZ +2|| 265parse_oid_hex(p, &iter->oid, &p) || 266!isspace(*p++)) 267die_invalid_line(iter->packed_refs->refs->path, 268 iter->pos, iter->eof - iter->pos); 269 270 eol =memchr(p,'\n', iter->eof - p); 271if(!eol) 272die_unterminated_line(iter->packed_refs->refs->path, 273 iter->pos, iter->eof - iter->pos); 274 275strbuf_add(&iter->refname_buf, p, eol - p); 276 iter->base.refname = iter->refname_buf.buf; 277 278if(check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) { 279if(!refname_is_safe(iter->base.refname)) 280die("packed refname is dangerous:%s", 281 iter->base.refname); 282oidclr(&iter->oid); 283 iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN; 284} 285if(iter->packed_refs->peeled == PEELED_FULLY || 286(iter->packed_refs->peeled == PEELED_TAGS && 287starts_with(iter->base.refname,"refs/tags/"))) 288 iter->base.flags |= REF_KNOWS_PEELED; 289 290 iter->pos = eol +1; 291 292if(iter->pos < iter->eof && *iter->pos =='^') { 293 p = iter->pos +1; 294if(iter->eof - p < GIT_SHA1_HEXSZ +1|| 295parse_oid_hex(p, &iter->peeled, &p) || 296*p++ !='\n') 297die_invalid_line(iter->packed_refs->refs->path, 298 iter->pos, iter->eof - iter->pos); 299 iter->pos = p; 300 301/* 302 * Regardless of what the file header said, we 303 * definitely know the value of *this* reference. But 304 * we suppress it if the reference is broken: 305 */ 306if((iter->base.flags & REF_ISBROKEN)) { 307oidclr(&iter->peeled); 308 iter->base.flags &= ~REF_KNOWS_PEELED; 309}else{ 310 iter->base.flags |= REF_KNOWS_PEELED; 311} 312}else{ 313oidclr(&iter->peeled); 314} 315 316return ITER_OK; 317} 318 319static intmmapped_ref_iterator_peel(struct ref_iterator *ref_iterator, 320struct object_id *peeled) 321{ 322struct mmapped_ref_iterator *iter = 323(struct mmapped_ref_iterator *)ref_iterator; 324 325if((iter->base.flags & REF_KNOWS_PEELED)) { 326oidcpy(peeled, &iter->peeled); 327returnis_null_oid(&iter->peeled) ? -1:0; 328}else if((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) { 329return-1; 330}else{ 331return!!peel_object(iter->oid.hash, peeled->hash); 332} 333} 334 335static intmmapped_ref_iterator_abort(struct ref_iterator *ref_iterator) 336{ 337struct mmapped_ref_iterator *iter = 338(struct mmapped_ref_iterator *)ref_iterator; 339 340release_packed_ref_cache(iter->packed_refs); 341strbuf_release(&iter->refname_buf); 342base_ref_iterator_free(ref_iterator); 343return ITER_DONE; 344} 345 346static struct ref_iterator_vtable mmapped_ref_iterator_vtable = { 347 mmapped_ref_iterator_advance, 348 mmapped_ref_iterator_peel, 349 mmapped_ref_iterator_abort 350}; 351 352struct ref_iterator *mmapped_ref_iterator_begin( 353struct packed_ref_cache *packed_refs, 354const char*pos,const char*eof) 355{ 356struct mmapped_ref_iterator *iter =xcalloc(1,sizeof(*iter)); 357struct ref_iterator *ref_iterator = &iter->base; 358 359if(!packed_refs->buf) 360returnempty_ref_iterator_begin(); 361 362base_ref_iterator_init(ref_iterator, &mmapped_ref_iterator_vtable,1); 363 364 iter->packed_refs = packed_refs; 365acquire_packed_ref_cache(iter->packed_refs); 366 iter->pos = pos; 367 iter->eof = eof; 368strbuf_init(&iter->refname_buf,0); 369 370 iter->base.oid = &iter->oid; 371 372return ref_iterator; 373} 374 375struct packed_ref_entry { 376const char*start; 377size_t len; 378}; 379 380static intcmp_packed_ref_entries(const void*v1,const void*v2) 381{ 382const struct packed_ref_entry *e1 = v1, *e2 = v2; 383const char*r1 = e1->start + GIT_SHA1_HEXSZ +1; 384const char*r2 = e2->start + GIT_SHA1_HEXSZ +1; 385 386while(1) { 387if(*r1 =='\n') 388return*r2 =='\n'?0: -1; 389if(*r1 != *r2) { 390if(*r2 =='\n') 391return1; 392else 393return(unsigned char)*r1 < (unsigned char)*r2 ? -1: +1; 394} 395 r1++; 396 r2++; 397} 398} 399 400/* 401 * Compare a packed-refs record pointed to by `rec` to the specified 402 * NUL-terminated refname. 403 */ 404static intcmp_entry_to_refname(const char*rec,const char*refname) 405{ 406const char*r1 = rec + GIT_SHA1_HEXSZ +1; 407const char*r2 = refname; 408 409while(1) { 410if(*r1 =='\n') 411return*r2 ? -1:0; 412if(!*r2) 413return1; 414if(*r1 != *r2) 415return(unsigned char)*r1 < (unsigned char)*r2 ? -1: +1; 416 r1++; 417 r2++; 418} 419} 420 421/* 422 * `packed_refs->buf` is not known to be sorted. Check whether it is, 423 * and if not, sort it into new memory and munmap/free the old 424 * storage. 425 */ 426static voidsort_packed_refs(struct packed_ref_cache *packed_refs) 427{ 428struct packed_ref_entry *entries = NULL; 429size_t alloc =0, nr =0; 430int sorted =1; 431const char*pos, *eof, *eol; 432size_t len, i; 433char*new_buffer, *dst; 434 435 pos = packed_refs->buf + packed_refs->header_len; 436 eof = packed_refs->eof; 437 len = eof - pos; 438 439if(!len) 440return; 441 442/* 443 * Initialize entries based on a crude estimate of the number 444 * of references in the file (we'll grow it below if needed): 445 */ 446ALLOC_GROW(entries, len /80+20, alloc); 447 448while(pos < eof) { 449 eol =memchr(pos,'\n', eof - pos); 450if(!eol) 451/* The safety check should prevent this. */ 452BUG("unterminated line found in packed-refs"); 453if(eol - pos < GIT_SHA1_HEXSZ +2) 454die_invalid_line(packed_refs->refs->path, 455 pos, eof - pos); 456 eol++; 457if(eol < eof && *eol =='^') { 458/* 459 * Keep any peeled line together with its 460 * reference: 461 */ 462const char*peeled_start = eol; 463 464 eol =memchr(peeled_start,'\n', eof - peeled_start); 465if(!eol) 466/* The safety check should prevent this. */ 467BUG("unterminated peeled line found in packed-refs"); 468 eol++; 469} 470 471ALLOC_GROW(entries, nr +1, alloc); 472 entries[nr].start = pos; 473 entries[nr].len = eol - pos; 474 nr++; 475 476if(sorted && 477 nr >1&& 478cmp_packed_ref_entries(&entries[nr -2], 479&entries[nr -1]) >=0) 480 sorted =0; 481 482 pos = eol; 483} 484 485if(sorted) 486goto cleanup; 487 488/* We need to sort the memory. First we sort the entries array: */ 489QSORT(entries, nr, cmp_packed_ref_entries); 490 491/* 492 * Allocate a new chunk of memory, and copy the old memory to 493 * the new in the order indicated by `entries` (not bothering 494 * with the header line): 495 */ 496 new_buffer =xmalloc(len); 497for(dst = new_buffer, i =0; i < nr; i++) { 498memcpy(dst, entries[i].start, entries[i].len); 499 dst += entries[i].len; 500} 501 502/* 503 * Now munmap the old buffer and use the sorted buffer in its 504 * place: 505 */ 506release_packed_ref_buffer(packed_refs); 507 packed_refs->buf = new_buffer; 508 packed_refs->eof = new_buffer + len; 509 packed_refs->header_len =0; 510 511cleanup: 512free(entries); 513} 514 515/* 516 * Return a pointer to the start of the record that contains the 517 * character `*p` (which must be within the buffer). If no other 518 * record start is found, return `buf`. 519 */ 520static const char*find_start_of_record(const char*buf,const char*p) 521{ 522while(p > buf && (p[-1] !='\n'|| p[0] =='^')) 523 p--; 524return p; 525} 526 527/* 528 * Return a pointer to the start of the record following the record 529 * that contains `*p`. If none is found before `end`, return `end`. 530 */ 531static const char*find_end_of_record(const char*p,const char*end) 532{ 533while(++p < end && (p[-1] !='\n'|| p[0] =='^')) 534; 535return p; 536} 537 538/* 539 * We want to be able to compare mmapped reference records quickly, 540 * without totally parsing them. We can do so because the records are 541 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ 542 * + 1) bytes past the beginning of the record. 543 * 544 * But what if the `packed-refs` file contains garbage? We're willing 545 * to tolerate not detecting the problem, as long as we don't produce 546 * totally garbled output (we can't afford to check the integrity of 547 * the whole file during every Git invocation). But we do want to be 548 * sure that we never read past the end of the buffer in memory and 549 * perform an illegal memory access. 550 * 551 * Guarantee that minimum level of safety by verifying that the last 552 * record in the file is LF-terminated, and that it has at least 553 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of 554 * these checks fails. 555 */ 556static voidverify_buffer_safe(struct packed_ref_cache *packed_refs) 557{ 558const char*buf = packed_refs->buf + packed_refs->header_len; 559const char*eof = packed_refs->eof; 560const char*last_line; 561 562if(buf == eof) 563return; 564 565 last_line =find_start_of_record(buf, eof -1); 566if(*(eof -1) !='\n'|| eof - last_line < GIT_SHA1_HEXSZ +2) 567die_invalid_line(packed_refs->refs->path, 568 last_line, eof - last_line); 569} 570 571/* 572 * Depending on `mmap_strategy`, either mmap or read the contents of 573 * the `packed-refs` file into the `packed_refs` instance. Return 1 if 574 * the file existed and was read, or 0 if the file was absent. Die on 575 * errors. 576 */ 577static intload_contents(struct packed_ref_cache *packed_refs) 578{ 579int fd; 580struct stat st; 581size_t size; 582 ssize_t bytes_read; 583 584 fd =open(packed_refs->refs->path, O_RDONLY); 585if(fd <0) { 586if(errno == ENOENT) { 587/* 588 * This is OK; it just means that no 589 * "packed-refs" file has been written yet, 590 * which is equivalent to it being empty, 591 * which is its state when initialized with 592 * zeros. 593 */ 594return0; 595}else{ 596die_errno("couldn't read%s", packed_refs->refs->path); 597} 598} 599 600stat_validity_update(&packed_refs->validity, fd); 601 602if(fstat(fd, &st) <0) 603die_errno("couldn't stat%s", packed_refs->refs->path); 604 size =xsize_t(st.st_size); 605 606switch(mmap_strategy) { 607case MMAP_NONE: 608 packed_refs->buf =xmalloc(size); 609 bytes_read =read_in_full(fd, packed_refs->buf, size); 610if(bytes_read <0|| bytes_read != size) 611die_errno("couldn't read%s", packed_refs->refs->path); 612 packed_refs->eof = packed_refs->buf + size; 613 packed_refs->mmapped =0; 614break; 615case MMAP_TEMPORARY: 616case MMAP_OK: 617 packed_refs->buf =xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd,0); 618 packed_refs->eof = packed_refs->buf + size; 619 packed_refs->mmapped =1; 620break; 621} 622close(fd); 623 624return1; 625} 626 627/* 628 * Find the place in `cache->buf` where the start of the record for 629 * `refname` starts. If `mustexist` is true and the reference doesn't 630 * exist, then return NULL. If `mustexist` is false and the reference 631 * doesn't exist, then return the point where that reference would be 632 * inserted. In the latter mode, `refname` doesn't have to be a proper 633 * reference name; for example, one could search for "refs/replace/" 634 * to find the start of any replace references. 635 * 636 * The record is sought using a binary search, so `cache->buf` must be 637 * sorted. 638 */ 639static const char*find_reference_location(struct packed_ref_cache *cache, 640const char*refname,int mustexist) 641{ 642/* 643 * This is not *quite* a garden-variety binary search, because 644 * the data we're searching is made up of records, and we 645 * always need to find the beginning of a record to do a 646 * comparison. A "record" here is one line for the reference 647 * itself and zero or one peel lines that start with '^'. Our 648 * loop invariant is described in the next two comments. 649 */ 650 651/* 652 * A pointer to the character at the start of a record whose 653 * preceding records all have reference names that come 654 * *before* `refname`. 655 */ 656const char*lo = cache->buf + cache->header_len; 657 658/* 659 * A pointer to a the first character of a record whose 660 * reference name comes *after* `refname`. 661 */ 662const char*hi = cache->eof; 663 664while(lo < hi) { 665const char*mid, *rec; 666int cmp; 667 668 mid = lo + (hi - lo) /2; 669 rec =find_start_of_record(lo, mid); 670 cmp =cmp_entry_to_refname(rec, refname); 671if(cmp <0) { 672 lo =find_end_of_record(mid, hi); 673}else if(cmp >0) { 674 hi = rec; 675}else{ 676return rec; 677} 678} 679 680if(mustexist) 681return NULL; 682else 683return lo; 684} 685 686/* 687 * Read from the `packed-refs` file into a newly-allocated 688 * `packed_ref_cache` and return it. The return value will already 689 * have its reference count incremented. 690 * 691 * A comment line of the form "# pack-refs with: " may contain zero or 692 * more traits. We interpret the traits as follows: 693 * 694 * Neither `peeled` nor `fully-peeled`: 695 * 696 * Probably no references are peeled. But if the file contains a 697 * peeled value for a reference, we will use it. 698 * 699 * `peeled`: 700 * 701 * References under "refs/tags/", if they *can* be peeled, *are* 702 * peeled in this file. References outside of "refs/tags/" are 703 * probably not peeled even if they could have been, but if we find 704 * a peeled value for such a reference we will use it. 705 * 706 * `fully-peeled`: 707 * 708 * All references in the file that can be peeled are peeled. 709 * Inversely (and this is more important), any references in the 710 * file for which no peeled value is recorded is not peelable. This 711 * trait should typically be written alongside "peeled" for 712 * compatibility with older clients, but we do not require it 713 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 714 * 715 * `sorted`: 716 * 717 * The references in this file are known to be sorted by refname. 718 */ 719static struct packed_ref_cache *read_packed_refs(struct packed_ref_store *refs) 720{ 721struct packed_ref_cache *packed_refs =xcalloc(1,sizeof(*packed_refs)); 722struct ref_dir *dir; 723struct ref_iterator *iter; 724int sorted =0; 725int ok; 726 727 packed_refs->refs = refs; 728acquire_packed_ref_cache(packed_refs); 729 packed_refs->cache =create_ref_cache(NULL, NULL); 730 packed_refs->cache->root->flag &= ~REF_INCOMPLETE; 731 packed_refs->peeled = PEELED_NONE; 732 733if(!load_contents(packed_refs)) 734return packed_refs; 735 736/* If the file has a header line, process it: */ 737if(packed_refs->buf < packed_refs->eof && *packed_refs->buf =='#') { 738struct strbuf tmp = STRBUF_INIT; 739char*p; 740const char*eol; 741struct string_list traits = STRING_LIST_INIT_NODUP; 742 743 eol =memchr(packed_refs->buf,'\n', 744 packed_refs->eof - packed_refs->buf); 745if(!eol) 746die_unterminated_line(refs->path, 747 packed_refs->buf, 748 packed_refs->eof - packed_refs->buf); 749 750strbuf_add(&tmp, packed_refs->buf, eol - packed_refs->buf); 751 752if(!skip_prefix(tmp.buf,"# pack-refs with:", (const char**)&p)) 753die_invalid_line(refs->path, 754 packed_refs->buf, 755 packed_refs->eof - packed_refs->buf); 756 757string_list_split_in_place(&traits, p,' ', -1); 758 759if(unsorted_string_list_has_string(&traits,"fully-peeled")) 760 packed_refs->peeled = PEELED_FULLY; 761else if(unsorted_string_list_has_string(&traits,"peeled")) 762 packed_refs->peeled = PEELED_TAGS; 763 764 sorted =unsorted_string_list_has_string(&traits,"sorted"); 765 766/* perhaps other traits later as well */ 767 768/* The "+ 1" is for the LF character. */ 769 packed_refs->header_len = eol +1- packed_refs->buf; 770 771string_list_clear(&traits,0); 772strbuf_release(&tmp); 773} 774 775verify_buffer_safe(packed_refs); 776 777if(!sorted) { 778sort_packed_refs(packed_refs); 779 780/* 781 * Reordering the records might have moved a short one 782 * to the end of the buffer, so verify the buffer's 783 * safety again: 784 */ 785verify_buffer_safe(packed_refs); 786} 787 788if(mmap_strategy != MMAP_OK && packed_refs->mmapped) { 789/* 790 * We don't want to leave the file mmapped, so we are 791 * forced to make a copy now: 792 */ 793size_t size = packed_refs->eof - 794(packed_refs->buf + packed_refs->header_len); 795char*buf_copy =xmalloc(size); 796 797memcpy(buf_copy, packed_refs->buf + packed_refs->header_len, size); 798release_packed_ref_buffer(packed_refs); 799 packed_refs->buf = buf_copy; 800 packed_refs->eof = buf_copy + size; 801} 802 803 dir =get_ref_dir(packed_refs->cache->root); 804 iter =mmapped_ref_iterator_begin( 805 packed_refs, 806 packed_refs->buf + packed_refs->header_len, 807 packed_refs->eof); 808while((ok =ref_iterator_advance(iter)) == ITER_OK) { 809struct ref_entry *entry = 810create_ref_entry(iter->refname, iter->oid, iter->flags); 811 812if((iter->flags & REF_KNOWS_PEELED)) 813ref_iterator_peel(iter, &entry->u.value.peeled); 814add_ref_entry(dir, entry); 815} 816 817if(ok != ITER_DONE) 818die("error reading packed-refs file%s", refs->path); 819 820return packed_refs; 821} 822 823/* 824 * Check that the packed refs cache (if any) still reflects the 825 * contents of the file. If not, clear the cache. 826 */ 827static voidvalidate_packed_ref_cache(struct packed_ref_store *refs) 828{ 829if(refs->cache && 830!stat_validity_check(&refs->cache->validity, refs->path)) 831clear_packed_ref_cache(refs); 832} 833 834/* 835 * Get the packed_ref_cache for the specified packed_ref_store, 836 * creating and populating it if it hasn't been read before or if the 837 * file has been changed (according to its `validity` field) since it 838 * was last read. On the other hand, if we hold the lock, then assume 839 * that the file hasn't been changed out from under us, so skip the 840 * extra `stat()` call in `stat_validity_check()`. 841 */ 842static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs) 843{ 844if(!is_lock_file_locked(&refs->lock)) 845validate_packed_ref_cache(refs); 846 847if(!refs->cache) 848 refs->cache =read_packed_refs(refs); 849 850return refs->cache; 851} 852 853static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache) 854{ 855returnget_ref_dir(packed_ref_cache->cache->root); 856} 857 858static struct ref_dir *get_packed_refs(struct packed_ref_store *refs) 859{ 860returnget_packed_ref_dir(get_packed_ref_cache(refs)); 861} 862 863/* 864 * Return the ref_entry for the given refname from the packed 865 * references. If it does not exist, return NULL. 866 */ 867static struct ref_entry *get_packed_ref(struct packed_ref_store *refs, 868const char*refname) 869{ 870returnfind_ref_entry(get_packed_refs(refs), refname); 871} 872 873static intpacked_read_raw_ref(struct ref_store *ref_store, 874const char*refname,unsigned char*sha1, 875struct strbuf *referent,unsigned int*type) 876{ 877struct packed_ref_store *refs = 878packed_downcast(ref_store, REF_STORE_READ,"read_raw_ref"); 879 880struct ref_entry *entry; 881 882*type =0; 883 884 entry =get_packed_ref(refs, refname); 885if(!entry) { 886 errno = ENOENT; 887return-1; 888} 889 890hashcpy(sha1, entry->u.value.oid.hash); 891*type = REF_ISPACKED; 892return0; 893} 894 895static intpacked_peel_ref(struct ref_store *ref_store, 896const char*refname,unsigned char*sha1) 897{ 898struct packed_ref_store *refs = 899packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB, 900"peel_ref"); 901struct ref_entry *r =get_packed_ref(refs, refname); 902 903if(!r ||peel_entry(r,0)) 904return-1; 905 906hashcpy(sha1, r->u.value.peeled.hash); 907return0; 908} 909 910struct packed_ref_iterator { 911struct ref_iterator base; 912 913struct packed_ref_cache *cache; 914struct ref_iterator *iter0; 915unsigned int flags; 916}; 917 918static intpacked_ref_iterator_advance(struct ref_iterator *ref_iterator) 919{ 920struct packed_ref_iterator *iter = 921(struct packed_ref_iterator *)ref_iterator; 922int ok; 923 924while((ok =ref_iterator_advance(iter->iter0)) == ITER_OK) { 925if(iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 926ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE) 927continue; 928 929if(!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 930!ref_resolves_to_object(iter->iter0->refname, 931 iter->iter0->oid, 932 iter->iter0->flags)) 933continue; 934 935 iter->base.refname = iter->iter0->refname; 936 iter->base.oid = iter->iter0->oid; 937 iter->base.flags = iter->iter0->flags; 938return ITER_OK; 939} 940 941 iter->iter0 = NULL; 942if(ref_iterator_abort(ref_iterator) != ITER_DONE) 943 ok = ITER_ERROR; 944 945return ok; 946} 947 948static intpacked_ref_iterator_peel(struct ref_iterator *ref_iterator, 949struct object_id *peeled) 950{ 951struct packed_ref_iterator *iter = 952(struct packed_ref_iterator *)ref_iterator; 953 954returnref_iterator_peel(iter->iter0, peeled); 955} 956 957static intpacked_ref_iterator_abort(struct ref_iterator *ref_iterator) 958{ 959struct packed_ref_iterator *iter = 960(struct packed_ref_iterator *)ref_iterator; 961int ok = ITER_DONE; 962 963if(iter->iter0) 964 ok =ref_iterator_abort(iter->iter0); 965 966release_packed_ref_cache(iter->cache); 967base_ref_iterator_free(ref_iterator); 968return ok; 969} 970 971static struct ref_iterator_vtable packed_ref_iterator_vtable = { 972 packed_ref_iterator_advance, 973 packed_ref_iterator_peel, 974 packed_ref_iterator_abort 975}; 976 977static struct ref_iterator *packed_ref_iterator_begin( 978struct ref_store *ref_store, 979const char*prefix,unsigned int flags) 980{ 981struct packed_ref_store *refs; 982struct packed_ref_cache *packed_refs; 983const char*start; 984struct packed_ref_iterator *iter; 985struct ref_iterator *ref_iterator; 986unsigned int required_flags = REF_STORE_READ; 987 988if(!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 989 required_flags |= REF_STORE_ODB; 990 refs =packed_downcast(ref_store, required_flags,"ref_iterator_begin"); 991 992 iter =xcalloc(1,sizeof(*iter)); 993 ref_iterator = &iter->base; 994base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable,1); 995 996/* 997 * Note that get_packed_ref_cache() internally checks whether 998 * the packed-ref cache is up to date with what is on disk, 999 * and re-reads it if not.1000 */1001 iter->cache = packed_refs =get_packed_ref_cache(refs);1002acquire_packed_ref_cache(packed_refs);10031004if(prefix && *prefix)1005 start =find_reference_location(packed_refs, prefix,0);1006else1007 start = packed_refs->buf + packed_refs->header_len;10081009 iter->iter0 =mmapped_ref_iterator_begin(1010 packed_refs, start, packed_refs->eof);10111012 iter->flags = flags;10131014if(prefix && *prefix)1015/* Stop iteration after we've gone *past* prefix: */1016 ref_iterator =prefix_ref_iterator_begin(ref_iterator, prefix,0);10171018return ref_iterator;1019}10201021/*1022 * Write an entry to the packed-refs file for the specified refname.1023 * If peeled is non-NULL, write it as the entry's peeled value. On1024 * error, return a nonzero value and leave errno set at the value left1025 * by the failing call to `fprintf()`.1026 */1027static intwrite_packed_entry(FILE*fh,const char*refname,1028const unsigned char*sha1,1029const unsigned char*peeled)1030{1031if(fprintf(fh,"%s %s\n",sha1_to_hex(sha1), refname) <0||1032(peeled &&fprintf(fh,"^%s\n",sha1_to_hex(peeled)) <0))1033return-1;10341035return0;1036}10371038intpacked_refs_lock(struct ref_store *ref_store,int flags,struct strbuf *err)1039{1040struct packed_ref_store *refs =1041packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,1042"packed_refs_lock");1043static int timeout_configured =0;1044static int timeout_value =1000;10451046if(!timeout_configured) {1047git_config_get_int("core.packedrefstimeout", &timeout_value);1048 timeout_configured =1;1049}10501051/*1052 * Note that we close the lockfile immediately because we1053 * don't write new content to it, but rather to a separate1054 * tempfile.1055 */1056if(hold_lock_file_for_update_timeout(1057&refs->lock,1058 refs->path,1059 flags, timeout_value) <0) {1060unable_to_lock_message(refs->path, errno, err);1061return-1;1062}10631064if(close_lock_file(&refs->lock)) {1065strbuf_addf(err,"unable to close%s:%s", refs->path,strerror(errno));1066return-1;1067}10681069/*1070 * Now that we hold the `packed-refs` lock, make sure that our1071 * cache matches the current version of the file. Normally1072 * `get_packed_ref_cache()` does that for us, but that1073 * function assumes that when the file is locked, any existing1074 * cache is still valid. We've just locked the file, but it1075 * might have changed the moment *before* we locked it.1076 */1077validate_packed_ref_cache(refs);10781079/*1080 * Now make sure that the packed-refs file as it exists in the1081 * locked state is loaded into the cache:1082 */1083get_packed_ref_cache(refs);1084return0;1085}10861087voidpacked_refs_unlock(struct ref_store *ref_store)1088{1089struct packed_ref_store *refs =packed_downcast(1090 ref_store,1091 REF_STORE_READ | REF_STORE_WRITE,1092"packed_refs_unlock");10931094if(!is_lock_file_locked(&refs->lock))1095die("BUG: packed_refs_unlock() called when not locked");1096rollback_lock_file(&refs->lock);1097}10981099intpacked_refs_is_locked(struct ref_store *ref_store)1100{1101struct packed_ref_store *refs =packed_downcast(1102 ref_store,1103 REF_STORE_READ | REF_STORE_WRITE,1104"packed_refs_is_locked");11051106returnis_lock_file_locked(&refs->lock);1107}11081109/*1110 * The packed-refs header line that we write out. Perhaps other1111 * traits will be added later.1112 *1113 * Note that earlier versions of Git used to parse these traits by1114 * looking for " trait " in the line. For this reason, the space after1115 * the colon and the trailing space are required.1116 */1117static const char PACKED_REFS_HEADER[] =1118"# pack-refs with: peeled fully-peeled sorted\n";11191120static intpacked_init_db(struct ref_store *ref_store,struct strbuf *err)1121{1122/* Nothing to do. */1123return0;1124}11251126/*1127 * Write the packed-refs from the cache to the packed-refs tempfile,1128 * incorporating any changes from `updates`. `updates` must be a1129 * sorted string list whose keys are the refnames and whose util1130 * values are `struct ref_update *`. On error, rollback the tempfile,1131 * write an error message to `err`, and return a nonzero value.1132 *1133 * The packfile must be locked before calling this function and will1134 * remain locked when it is done.1135 */1136static intwrite_with_updates(struct packed_ref_store *refs,1137struct string_list *updates,1138struct strbuf *err)1139{1140struct ref_iterator *iter = NULL;1141size_t i;1142int ok;1143FILE*out;1144struct strbuf sb = STRBUF_INIT;1145char*packed_refs_path;11461147if(!is_lock_file_locked(&refs->lock))1148die("BUG: write_with_updates() called while unlocked");11491150/*1151 * If packed-refs is a symlink, we want to overwrite the1152 * symlinked-to file, not the symlink itself. Also, put the1153 * staging file next to it:1154 */1155 packed_refs_path =get_locked_file_path(&refs->lock);1156strbuf_addf(&sb,"%s.new", packed_refs_path);1157free(packed_refs_path);1158if(create_tempfile(&refs->tempfile, sb.buf) <0) {1159strbuf_addf(err,"unable to create file%s:%s",1160 sb.buf,strerror(errno));1161strbuf_release(&sb);1162return-1;1163}1164strbuf_release(&sb);11651166 out =fdopen_tempfile(&refs->tempfile,"w");1167if(!out) {1168strbuf_addf(err,"unable to fdopen packed-refs tempfile:%s",1169strerror(errno));1170goto error;1171}11721173if(fprintf(out,"%s", PACKED_REFS_HEADER) <0)1174goto write_error;11751176/*1177 * We iterate in parallel through the current list of refs and1178 * the list of updates, processing an entry from at least one1179 * of the lists each time through the loop. When the current1180 * list of refs is exhausted, set iter to NULL. When the list1181 * of updates is exhausted, leave i set to updates->nr.1182 */1183 iter =packed_ref_iterator_begin(&refs->base,"",1184 DO_FOR_EACH_INCLUDE_BROKEN);1185if((ok =ref_iterator_advance(iter)) != ITER_OK)1186 iter = NULL;11871188 i =0;11891190while(iter || i < updates->nr) {1191struct ref_update *update = NULL;1192int cmp;11931194if(i >= updates->nr) {1195 cmp = -1;1196}else{1197 update = updates->items[i].util;11981199if(!iter)1200 cmp = +1;1201else1202 cmp =strcmp(iter->refname, update->refname);1203}12041205if(!cmp) {1206/*1207 * There is both an old value and an update1208 * for this reference. Check the old value if1209 * necessary:1210 */1211if((update->flags & REF_HAVE_OLD)) {1212if(is_null_oid(&update->old_oid)) {1213strbuf_addf(err,"cannot update ref '%s': "1214"reference already exists",1215 update->refname);1216goto error;1217}else if(oidcmp(&update->old_oid, iter->oid)) {1218strbuf_addf(err,"cannot update ref '%s': "1219"is at%sbut expected%s",1220 update->refname,1221oid_to_hex(iter->oid),1222oid_to_hex(&update->old_oid));1223goto error;1224}1225}12261227/* Now figure out what to use for the new value: */1228if((update->flags & REF_HAVE_NEW)) {1229/*1230 * The update takes precedence. Skip1231 * the iterator over the unneeded1232 * value.1233 */1234if((ok =ref_iterator_advance(iter)) != ITER_OK)1235 iter = NULL;1236 cmp = +1;1237}else{1238/*1239 * The update doesn't actually want to1240 * change anything. We're done with it.1241 */1242 i++;1243 cmp = -1;1244}1245}else if(cmp >0) {1246/*1247 * There is no old value but there is an1248 * update for this reference. Make sure that1249 * the update didn't expect an existing value:1250 */1251if((update->flags & REF_HAVE_OLD) &&1252!is_null_oid(&update->old_oid)) {1253strbuf_addf(err,"cannot update ref '%s': "1254"reference is missing but expected%s",1255 update->refname,1256oid_to_hex(&update->old_oid));1257goto error;1258}1259}12601261if(cmp <0) {1262/* Pass the old reference through. */12631264struct object_id peeled;1265int peel_error =ref_iterator_peel(iter, &peeled);12661267if(write_packed_entry(out, iter->refname,1268 iter->oid->hash,1269 peel_error ? NULL : peeled.hash))1270goto write_error;12711272if((ok =ref_iterator_advance(iter)) != ITER_OK)1273 iter = NULL;1274}else if(is_null_oid(&update->new_oid)) {1275/*1276 * The update wants to delete the reference,1277 * and the reference either didn't exist or we1278 * have already skipped it. So we're done with1279 * the update (and don't have to write1280 * anything).1281 */1282 i++;1283}else{1284struct object_id peeled;1285int peel_error =peel_object(update->new_oid.hash,1286 peeled.hash);12871288if(write_packed_entry(out, update->refname,1289 update->new_oid.hash,1290 peel_error ? NULL : peeled.hash))1291goto write_error;12921293 i++;1294}1295}12961297if(ok != ITER_DONE) {1298strbuf_addf(err,"unable to write packed-refs file: "1299"error iterating over old contents");1300goto error;1301}13021303if(close_tempfile(&refs->tempfile)) {1304strbuf_addf(err,"error closing file%s:%s",1305get_tempfile_path(&refs->tempfile),1306strerror(errno));1307strbuf_release(&sb);1308return-1;1309}13101311return0;13121313write_error:1314strbuf_addf(err,"error writing to%s:%s",1315get_tempfile_path(&refs->tempfile),strerror(errno));13161317error:1318if(iter)1319ref_iterator_abort(iter);13201321delete_tempfile(&refs->tempfile);1322return-1;1323}13241325struct packed_transaction_backend_data {1326/* True iff the transaction owns the packed-refs lock. */1327int own_lock;13281329struct string_list updates;1330};13311332static voidpacked_transaction_cleanup(struct packed_ref_store *refs,1333struct ref_transaction *transaction)1334{1335struct packed_transaction_backend_data *data = transaction->backend_data;13361337if(data) {1338string_list_clear(&data->updates,0);13391340if(is_tempfile_active(&refs->tempfile))1341delete_tempfile(&refs->tempfile);13421343if(data->own_lock &&is_lock_file_locked(&refs->lock)) {1344packed_refs_unlock(&refs->base);1345 data->own_lock =0;1346}13471348free(data);1349 transaction->backend_data = NULL;1350}13511352 transaction->state = REF_TRANSACTION_CLOSED;1353}13541355static intpacked_transaction_prepare(struct ref_store *ref_store,1356struct ref_transaction *transaction,1357struct strbuf *err)1358{1359struct packed_ref_store *refs =packed_downcast(1360 ref_store,1361 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1362"ref_transaction_prepare");1363struct packed_transaction_backend_data *data;1364size_t i;1365int ret = TRANSACTION_GENERIC_ERROR;13661367/*1368 * Note that we *don't* skip transactions with zero updates,1369 * because such a transaction might be executed for the side1370 * effect of ensuring that all of the references are peeled.1371 * If the caller wants to optimize away empty transactions, it1372 * should do so itself.1373 */13741375 data =xcalloc(1,sizeof(*data));1376string_list_init(&data->updates,0);13771378 transaction->backend_data = data;13791380/*1381 * Stick the updates in a string list by refname so that we1382 * can sort them:1383 */1384for(i =0; i < transaction->nr; i++) {1385struct ref_update *update = transaction->updates[i];1386struct string_list_item *item =1387string_list_append(&data->updates, update->refname);13881389/* Store a pointer to update in item->util: */1390 item->util = update;1391}1392string_list_sort(&data->updates);13931394if(ref_update_reject_duplicates(&data->updates, err))1395goto failure;13961397if(!is_lock_file_locked(&refs->lock)) {1398if(packed_refs_lock(ref_store,0, err))1399goto failure;1400 data->own_lock =1;1401}14021403if(write_with_updates(refs, &data->updates, err))1404goto failure;14051406 transaction->state = REF_TRANSACTION_PREPARED;1407return0;14081409failure:1410packed_transaction_cleanup(refs, transaction);1411return ret;1412}14131414static intpacked_transaction_abort(struct ref_store *ref_store,1415struct ref_transaction *transaction,1416struct strbuf *err)1417{1418struct packed_ref_store *refs =packed_downcast(1419 ref_store,1420 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1421"ref_transaction_abort");14221423packed_transaction_cleanup(refs, transaction);1424return0;1425}14261427static intpacked_transaction_finish(struct ref_store *ref_store,1428struct ref_transaction *transaction,1429struct strbuf *err)1430{1431struct packed_ref_store *refs =packed_downcast(1432 ref_store,1433 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1434"ref_transaction_finish");1435int ret = TRANSACTION_GENERIC_ERROR;1436char*packed_refs_path;14371438clear_packed_ref_cache(refs);14391440 packed_refs_path =get_locked_file_path(&refs->lock);1441if(rename_tempfile(&refs->tempfile, packed_refs_path)) {1442strbuf_addf(err,"error replacing%s:%s",1443 refs->path,strerror(errno));1444goto cleanup;1445}14461447 ret =0;14481449cleanup:1450free(packed_refs_path);1451packed_transaction_cleanup(refs, transaction);1452return ret;1453}14541455static intpacked_initial_transaction_commit(struct ref_store *ref_store,1456struct ref_transaction *transaction,1457struct strbuf *err)1458{1459returnref_transaction_commit(transaction, err);1460}14611462static intpacked_delete_refs(struct ref_store *ref_store,const char*msg,1463struct string_list *refnames,unsigned int flags)1464{1465struct packed_ref_store *refs =1466packed_downcast(ref_store, REF_STORE_WRITE,"delete_refs");1467struct strbuf err = STRBUF_INIT;1468struct ref_transaction *transaction;1469struct string_list_item *item;1470int ret;14711472(void)refs;/* We need the check above, but don't use the variable */14731474if(!refnames->nr)1475return0;14761477/*1478 * Since we don't check the references' old_oids, the1479 * individual updates can't fail, so we can pack all of the1480 * updates into a single transaction.1481 */14821483 transaction =ref_store_transaction_begin(ref_store, &err);1484if(!transaction)1485return-1;14861487for_each_string_list_item(item, refnames) {1488if(ref_transaction_delete(transaction, item->string, NULL,1489 flags, msg, &err)) {1490warning(_("could not delete reference%s:%s"),1491 item->string, err.buf);1492strbuf_reset(&err);1493}1494}14951496 ret =ref_transaction_commit(transaction, &err);14971498if(ret) {1499if(refnames->nr ==1)1500error(_("could not delete reference%s:%s"),1501 refnames->items[0].string, err.buf);1502else1503error(_("could not delete references:%s"), err.buf);1504}15051506ref_transaction_free(transaction);1507strbuf_release(&err);1508return ret;1509}15101511static intpacked_pack_refs(struct ref_store *ref_store,unsigned int flags)1512{1513/*1514 * Packed refs are already packed. It might be that loose refs1515 * are packed *into* a packed refs store, but that is done by1516 * updating the packed references via a transaction.1517 */1518return0;1519}15201521static intpacked_create_symref(struct ref_store *ref_store,1522const char*refname,const char*target,1523const char*logmsg)1524{1525die("BUG: packed reference store does not support symrefs");1526}15271528static intpacked_rename_ref(struct ref_store *ref_store,1529const char*oldrefname,const char*newrefname,1530const char*logmsg)1531{1532die("BUG: packed reference store does not support renaming references");1533}15341535static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)1536{1537returnempty_ref_iterator_begin();1538}15391540static intpacked_for_each_reflog_ent(struct ref_store *ref_store,1541const char*refname,1542 each_reflog_ent_fn fn,void*cb_data)1543{1544return0;1545}15461547static intpacked_for_each_reflog_ent_reverse(struct ref_store *ref_store,1548const char*refname,1549 each_reflog_ent_fn fn,1550void*cb_data)1551{1552return0;1553}15541555static intpacked_reflog_exists(struct ref_store *ref_store,1556const char*refname)1557{1558return0;1559}15601561static intpacked_create_reflog(struct ref_store *ref_store,1562const char*refname,int force_create,1563struct strbuf *err)1564{1565die("BUG: packed reference store does not support reflogs");1566}15671568static intpacked_delete_reflog(struct ref_store *ref_store,1569const char*refname)1570{1571return0;1572}15731574static intpacked_reflog_expire(struct ref_store *ref_store,1575const char*refname,const unsigned char*sha1,1576unsigned int flags,1577 reflog_expiry_prepare_fn prepare_fn,1578 reflog_expiry_should_prune_fn should_prune_fn,1579 reflog_expiry_cleanup_fn cleanup_fn,1580void*policy_cb_data)1581{1582return0;1583}15841585struct ref_storage_be refs_be_packed = {1586 NULL,1587"packed",1588 packed_ref_store_create,1589 packed_init_db,1590 packed_transaction_prepare,1591 packed_transaction_finish,1592 packed_transaction_abort,1593 packed_initial_transaction_commit,15941595 packed_pack_refs,1596 packed_peel_ref,1597 packed_create_symref,1598 packed_delete_refs,1599 packed_rename_ref,16001601 packed_ref_iterator_begin,1602 packed_read_raw_ref,16031604 packed_reflog_iterator_begin,1605 packed_for_each_reflog_ent,1606 packed_for_each_reflog_ent_reverse,1607 packed_reflog_exists,1608 packed_create_reflog,1609 packed_delete_reflog,1610 packed_reflog_expire1611};