refs / packed-backend.con commit ref_store: implement `refs_peel_ref()` generically (ba1c052)
   1#include "../cache.h"
   2#include "../config.h"
   3#include "../refs.h"
   4#include "refs-internal.h"
   5#include "ref-cache.h"
   6#include "packed-backend.h"
   7#include "../iterator.h"
   8#include "../lockfile.h"
   9
  10enum mmap_strategy {
  11        /*
  12         * Don't use mmap() at all for reading `packed-refs`.
  13         */
  14        MMAP_NONE,
  15
  16        /*
  17         * Can use mmap() for reading `packed-refs`, but the file must
  18         * not remain mmapped. This is the usual option on Windows,
  19         * where you cannot rename a new version of a file onto a file
  20         * that is currently mmapped.
  21         */
  22        MMAP_TEMPORARY,
  23
  24        /*
  25         * It is OK to leave the `packed-refs` file mmapped while
  26         * arbitrary other code is running.
  27         */
  28        MMAP_OK
  29};
  30
  31#if defined(NO_MMAP)
  32static enum mmap_strategy mmap_strategy = MMAP_NONE;
  33#elif defined(MMAP_PREVENTS_DELETE)
  34static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
  35#else
  36static enum mmap_strategy mmap_strategy = MMAP_OK;
  37#endif
  38
  39struct packed_ref_store;
  40
  41struct packed_ref_cache {
  42        /*
  43         * A back-pointer to the packed_ref_store with which this
  44         * cache is associated:
  45         */
  46        struct packed_ref_store *refs;
  47
  48        struct ref_cache *cache;
  49
  50        /* Is the `packed-refs` file currently mmapped? */
  51        int mmapped;
  52
  53        /*
  54         * The contents of the `packed-refs` file. If the file was
  55         * already sorted, this points at the mmapped contents of the
  56         * file. If not, this points at heap-allocated memory
  57         * containing the contents, sorted. If there were no contents
  58         * (e.g., because the file didn't exist), `buf` and `eof` are
  59         * both NULL.
  60         */
  61        char *buf, *eof;
  62
  63        /* The size of the header line, if any; otherwise, 0: */
  64        size_t header_len;
  65
  66        /*
  67         * What is the peeled state of this cache? (This is usually
  68         * determined from the header of the "packed-refs" file.)
  69         */
  70        enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
  71
  72        /*
  73         * Count of references to the data structure in this instance,
  74         * including the pointer from files_ref_store::packed if any.
  75         * The data will not be freed as long as the reference count
  76         * is nonzero.
  77         */
  78        unsigned int referrers;
  79
  80        /* The metadata from when this packed-refs cache was read */
  81        struct stat_validity validity;
  82};
  83
  84/*
  85 * A container for `packed-refs`-related data. It is not (yet) a
  86 * `ref_store`.
  87 */
  88struct packed_ref_store {
  89        struct ref_store base;
  90
  91        unsigned int store_flags;
  92
  93        /* The path of the "packed-refs" file: */
  94        char *path;
  95
  96        /*
  97         * A cache of the values read from the `packed-refs` file, if
  98         * it might still be current; otherwise, NULL.
  99         */
 100        struct packed_ref_cache *cache;
 101
 102        /*
 103         * Lock used for the "packed-refs" file. Note that this (and
 104         * thus the enclosing `packed_ref_store`) must not be freed.
 105         */
 106        struct lock_file lock;
 107
 108        /*
 109         * Temporary file used when rewriting new contents to the
 110         * "packed-refs" file. Note that this (and thus the enclosing
 111         * `packed_ref_store`) must not be freed.
 112         */
 113        struct tempfile tempfile;
 114};
 115
 116/*
 117 * Increment the reference count of *packed_refs.
 118 */
 119static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
 120{
 121        packed_refs->referrers++;
 122}
 123
 124/*
 125 * If the buffer in `packed_refs` is active, then either munmap the
 126 * memory and close the file, or free the memory. Then set the buffer
 127 * pointers to NULL.
 128 */
 129static void release_packed_ref_buffer(struct packed_ref_cache *packed_refs)
 130{
 131        if (packed_refs->mmapped) {
 132                if (munmap(packed_refs->buf,
 133                           packed_refs->eof - packed_refs->buf))
 134                        die_errno("error ummapping packed-refs file %s",
 135                                  packed_refs->refs->path);
 136                packed_refs->mmapped = 0;
 137        } else {
 138                free(packed_refs->buf);
 139        }
 140        packed_refs->buf = packed_refs->eof = NULL;
 141        packed_refs->header_len = 0;
 142}
 143
 144/*
 145 * Decrease the reference count of *packed_refs.  If it goes to zero,
 146 * free *packed_refs and return true; otherwise return false.
 147 */
 148static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
 149{
 150        if (!--packed_refs->referrers) {
 151                free_ref_cache(packed_refs->cache);
 152                stat_validity_clear(&packed_refs->validity);
 153                release_packed_ref_buffer(packed_refs);
 154                free(packed_refs);
 155                return 1;
 156        } else {
 157                return 0;
 158        }
 159}
 160
 161struct ref_store *packed_ref_store_create(const char *path,
 162                                          unsigned int store_flags)
 163{
 164        struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
 165        struct ref_store *ref_store = (struct ref_store *)refs;
 166
 167        base_ref_store_init(ref_store, &refs_be_packed);
 168        refs->store_flags = store_flags;
 169
 170        refs->path = xstrdup(path);
 171        return ref_store;
 172}
 173
 174/*
 175 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
 176 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
 177 * support at least the flags specified in `required_flags`. `caller`
 178 * is used in any necessary error messages.
 179 */
 180static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
 181                                                unsigned int required_flags,
 182                                                const char *caller)
 183{
 184        struct packed_ref_store *refs;
 185
 186        if (ref_store->be != &refs_be_packed)
 187                die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
 188                    ref_store->be->name, caller);
 189
 190        refs = (struct packed_ref_store *)ref_store;
 191
 192        if ((refs->store_flags & required_flags) != required_flags)
 193                die("BUG: unallowed operation (%s), requires %x, has %x\n",
 194                    caller, required_flags, refs->store_flags);
 195
 196        return refs;
 197}
 198
 199static void clear_packed_ref_cache(struct packed_ref_store *refs)
 200{
 201        if (refs->cache) {
 202                struct packed_ref_cache *cache = refs->cache;
 203
 204                refs->cache = NULL;
 205                release_packed_ref_cache(cache);
 206        }
 207}
 208
 209static NORETURN void die_unterminated_line(const char *path,
 210                                           const char *p, size_t len)
 211{
 212        if (len < 80)
 213                die("unterminated line in %s: %.*s", path, (int)len, p);
 214        else
 215                die("unterminated line in %s: %.75s...", path, p);
 216}
 217
 218static NORETURN void die_invalid_line(const char *path,
 219                                      const char *p, size_t len)
 220{
 221        const char *eol = memchr(p, '\n', len);
 222
 223        if (!eol)
 224                die_unterminated_line(path, p, len);
 225        else if (eol - p < 80)
 226                die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
 227        else
 228                die("unexpected line in %s: %.75s...", path, p);
 229
 230}
 231
 232/*
 233 * An iterator over a packed-refs file that is currently mmapped.
 234 */
 235struct mmapped_ref_iterator {
 236        struct ref_iterator base;
 237
 238        struct packed_ref_cache *packed_refs;
 239
 240        /* The current position in the mmapped file: */
 241        const char *pos;
 242
 243        /* The end of the mmapped file: */
 244        const char *eof;
 245
 246        struct object_id oid, peeled;
 247
 248        struct strbuf refname_buf;
 249};
 250
 251static int mmapped_ref_iterator_advance(struct ref_iterator *ref_iterator)
 252{
 253        struct mmapped_ref_iterator *iter =
 254                (struct mmapped_ref_iterator *)ref_iterator;
 255        const char *p = iter->pos, *eol;
 256
 257        strbuf_reset(&iter->refname_buf);
 258
 259        if (iter->pos == iter->eof)
 260                return ref_iterator_abort(ref_iterator);
 261
 262        iter->base.flags = REF_ISPACKED;
 263
 264        if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
 265            parse_oid_hex(p, &iter->oid, &p) ||
 266            !isspace(*p++))
 267                die_invalid_line(iter->packed_refs->refs->path,
 268                                 iter->pos, iter->eof - iter->pos);
 269
 270        eol = memchr(p, '\n', iter->eof - p);
 271        if (!eol)
 272                die_unterminated_line(iter->packed_refs->refs->path,
 273                                      iter->pos, iter->eof - iter->pos);
 274
 275        strbuf_add(&iter->refname_buf, p, eol - p);
 276        iter->base.refname = iter->refname_buf.buf;
 277
 278        if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
 279                if (!refname_is_safe(iter->base.refname))
 280                        die("packed refname is dangerous: %s",
 281                            iter->base.refname);
 282                oidclr(&iter->oid);
 283                iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
 284        }
 285        if (iter->packed_refs->peeled == PEELED_FULLY ||
 286            (iter->packed_refs->peeled == PEELED_TAGS &&
 287             starts_with(iter->base.refname, "refs/tags/")))
 288                iter->base.flags |= REF_KNOWS_PEELED;
 289
 290        iter->pos = eol + 1;
 291
 292        if (iter->pos < iter->eof && *iter->pos == '^') {
 293                p = iter->pos + 1;
 294                if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
 295                    parse_oid_hex(p, &iter->peeled, &p) ||
 296                    *p++ != '\n')
 297                        die_invalid_line(iter->packed_refs->refs->path,
 298                                         iter->pos, iter->eof - iter->pos);
 299                iter->pos = p;
 300
 301                /*
 302                 * Regardless of what the file header said, we
 303                 * definitely know the value of *this* reference. But
 304                 * we suppress it if the reference is broken:
 305                 */
 306                if ((iter->base.flags & REF_ISBROKEN)) {
 307                        oidclr(&iter->peeled);
 308                        iter->base.flags &= ~REF_KNOWS_PEELED;
 309                } else {
 310                        iter->base.flags |= REF_KNOWS_PEELED;
 311                }
 312        } else {
 313                oidclr(&iter->peeled);
 314        }
 315
 316        return ITER_OK;
 317}
 318
 319static int mmapped_ref_iterator_peel(struct ref_iterator *ref_iterator,
 320                                    struct object_id *peeled)
 321{
 322        struct mmapped_ref_iterator *iter =
 323                (struct mmapped_ref_iterator *)ref_iterator;
 324
 325        if ((iter->base.flags & REF_KNOWS_PEELED)) {
 326                oidcpy(peeled, &iter->peeled);
 327                return is_null_oid(&iter->peeled) ? -1 : 0;
 328        } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
 329                return -1;
 330        } else {
 331                return !!peel_object(iter->oid.hash, peeled->hash);
 332        }
 333}
 334
 335static int mmapped_ref_iterator_abort(struct ref_iterator *ref_iterator)
 336{
 337        struct mmapped_ref_iterator *iter =
 338                (struct mmapped_ref_iterator *)ref_iterator;
 339
 340        release_packed_ref_cache(iter->packed_refs);
 341        strbuf_release(&iter->refname_buf);
 342        base_ref_iterator_free(ref_iterator);
 343        return ITER_DONE;
 344}
 345
 346static struct ref_iterator_vtable mmapped_ref_iterator_vtable = {
 347        mmapped_ref_iterator_advance,
 348        mmapped_ref_iterator_peel,
 349        mmapped_ref_iterator_abort
 350};
 351
 352struct ref_iterator *mmapped_ref_iterator_begin(
 353                struct packed_ref_cache *packed_refs,
 354                const char *pos, const char *eof)
 355{
 356        struct mmapped_ref_iterator *iter = xcalloc(1, sizeof(*iter));
 357        struct ref_iterator *ref_iterator = &iter->base;
 358
 359        if (!packed_refs->buf)
 360                return empty_ref_iterator_begin();
 361
 362        base_ref_iterator_init(ref_iterator, &mmapped_ref_iterator_vtable, 1);
 363
 364        iter->packed_refs = packed_refs;
 365        acquire_packed_ref_cache(iter->packed_refs);
 366        iter->pos = pos;
 367        iter->eof = eof;
 368        strbuf_init(&iter->refname_buf, 0);
 369
 370        iter->base.oid = &iter->oid;
 371
 372        return ref_iterator;
 373}
 374
 375struct packed_ref_entry {
 376        const char *start;
 377        size_t len;
 378};
 379
 380static int cmp_packed_ref_entries(const void *v1, const void *v2)
 381{
 382        const struct packed_ref_entry *e1 = v1, *e2 = v2;
 383        const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1;
 384        const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1;
 385
 386        while (1) {
 387                if (*r1 == '\n')
 388                        return *r2 == '\n' ? 0 : -1;
 389                if (*r1 != *r2) {
 390                        if (*r2 == '\n')
 391                                return 1;
 392                        else
 393                                return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
 394                }
 395                r1++;
 396                r2++;
 397        }
 398}
 399
 400/*
 401 * Compare a packed-refs record pointed to by `rec` to the specified
 402 * NUL-terminated refname.
 403 */
 404static int cmp_entry_to_refname(const char *rec, const char *refname)
 405{
 406        const char *r1 = rec + GIT_SHA1_HEXSZ + 1;
 407        const char *r2 = refname;
 408
 409        while (1) {
 410                if (*r1 == '\n')
 411                        return *r2 ? -1 : 0;
 412                if (!*r2)
 413                        return 1;
 414                if (*r1 != *r2)
 415                        return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
 416                r1++;
 417                r2++;
 418        }
 419}
 420
 421/*
 422 * `packed_refs->buf` is not known to be sorted. Check whether it is,
 423 * and if not, sort it into new memory and munmap/free the old
 424 * storage.
 425 */
 426static void sort_packed_refs(struct packed_ref_cache *packed_refs)
 427{
 428        struct packed_ref_entry *entries = NULL;
 429        size_t alloc = 0, nr = 0;
 430        int sorted = 1;
 431        const char *pos, *eof, *eol;
 432        size_t len, i;
 433        char *new_buffer, *dst;
 434
 435        pos = packed_refs->buf + packed_refs->header_len;
 436        eof = packed_refs->eof;
 437        len = eof - pos;
 438
 439        if (!len)
 440                return;
 441
 442        /*
 443         * Initialize entries based on a crude estimate of the number
 444         * of references in the file (we'll grow it below if needed):
 445         */
 446        ALLOC_GROW(entries, len / 80 + 20, alloc);
 447
 448        while (pos < eof) {
 449                eol = memchr(pos, '\n', eof - pos);
 450                if (!eol)
 451                        /* The safety check should prevent this. */
 452                        BUG("unterminated line found in packed-refs");
 453                if (eol - pos < GIT_SHA1_HEXSZ + 2)
 454                        die_invalid_line(packed_refs->refs->path,
 455                                         pos, eof - pos);
 456                eol++;
 457                if (eol < eof && *eol == '^') {
 458                        /*
 459                         * Keep any peeled line together with its
 460                         * reference:
 461                         */
 462                        const char *peeled_start = eol;
 463
 464                        eol = memchr(peeled_start, '\n', eof - peeled_start);
 465                        if (!eol)
 466                                /* The safety check should prevent this. */
 467                                BUG("unterminated peeled line found in packed-refs");
 468                        eol++;
 469                }
 470
 471                ALLOC_GROW(entries, nr + 1, alloc);
 472                entries[nr].start = pos;
 473                entries[nr].len = eol - pos;
 474                nr++;
 475
 476                if (sorted &&
 477                    nr > 1 &&
 478                    cmp_packed_ref_entries(&entries[nr - 2],
 479                                           &entries[nr - 1]) >= 0)
 480                        sorted = 0;
 481
 482                pos = eol;
 483        }
 484
 485        if (sorted)
 486                goto cleanup;
 487
 488        /* We need to sort the memory. First we sort the entries array: */
 489        QSORT(entries, nr, cmp_packed_ref_entries);
 490
 491        /*
 492         * Allocate a new chunk of memory, and copy the old memory to
 493         * the new in the order indicated by `entries` (not bothering
 494         * with the header line):
 495         */
 496        new_buffer = xmalloc(len);
 497        for (dst = new_buffer, i = 0; i < nr; i++) {
 498                memcpy(dst, entries[i].start, entries[i].len);
 499                dst += entries[i].len;
 500        }
 501
 502        /*
 503         * Now munmap the old buffer and use the sorted buffer in its
 504         * place:
 505         */
 506        release_packed_ref_buffer(packed_refs);
 507        packed_refs->buf = new_buffer;
 508        packed_refs->eof = new_buffer + len;
 509        packed_refs->header_len = 0;
 510
 511cleanup:
 512        free(entries);
 513}
 514
 515/*
 516 * Return a pointer to the start of the record that contains the
 517 * character `*p` (which must be within the buffer). If no other
 518 * record start is found, return `buf`.
 519 */
 520static const char *find_start_of_record(const char *buf, const char *p)
 521{
 522        while (p > buf && (p[-1] != '\n' || p[0] == '^'))
 523                p--;
 524        return p;
 525}
 526
 527/*
 528 * Return a pointer to the start of the record following the record
 529 * that contains `*p`. If none is found before `end`, return `end`.
 530 */
 531static const char *find_end_of_record(const char *p, const char *end)
 532{
 533        while (++p < end && (p[-1] != '\n' || p[0] == '^'))
 534                ;
 535        return p;
 536}
 537
 538/*
 539 * We want to be able to compare mmapped reference records quickly,
 540 * without totally parsing them. We can do so because the records are
 541 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
 542 * + 1) bytes past the beginning of the record.
 543 *
 544 * But what if the `packed-refs` file contains garbage? We're willing
 545 * to tolerate not detecting the problem, as long as we don't produce
 546 * totally garbled output (we can't afford to check the integrity of
 547 * the whole file during every Git invocation). But we do want to be
 548 * sure that we never read past the end of the buffer in memory and
 549 * perform an illegal memory access.
 550 *
 551 * Guarantee that minimum level of safety by verifying that the last
 552 * record in the file is LF-terminated, and that it has at least
 553 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
 554 * these checks fails.
 555 */
 556static void verify_buffer_safe(struct packed_ref_cache *packed_refs)
 557{
 558        const char *buf = packed_refs->buf + packed_refs->header_len;
 559        const char *eof = packed_refs->eof;
 560        const char *last_line;
 561
 562        if (buf == eof)
 563                return;
 564
 565        last_line = find_start_of_record(buf, eof - 1);
 566        if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
 567                die_invalid_line(packed_refs->refs->path,
 568                                 last_line, eof - last_line);
 569}
 570
 571/*
 572 * Depending on `mmap_strategy`, either mmap or read the contents of
 573 * the `packed-refs` file into the `packed_refs` instance. Return 1 if
 574 * the file existed and was read, or 0 if the file was absent. Die on
 575 * errors.
 576 */
 577static int load_contents(struct packed_ref_cache *packed_refs)
 578{
 579        int fd;
 580        struct stat st;
 581        size_t size;
 582        ssize_t bytes_read;
 583
 584        fd = open(packed_refs->refs->path, O_RDONLY);
 585        if (fd < 0) {
 586                if (errno == ENOENT) {
 587                        /*
 588                         * This is OK; it just means that no
 589                         * "packed-refs" file has been written yet,
 590                         * which is equivalent to it being empty,
 591                         * which is its state when initialized with
 592                         * zeros.
 593                         */
 594                        return 0;
 595                } else {
 596                        die_errno("couldn't read %s", packed_refs->refs->path);
 597                }
 598        }
 599
 600        stat_validity_update(&packed_refs->validity, fd);
 601
 602        if (fstat(fd, &st) < 0)
 603                die_errno("couldn't stat %s", packed_refs->refs->path);
 604        size = xsize_t(st.st_size);
 605
 606        switch (mmap_strategy) {
 607        case MMAP_NONE:
 608                packed_refs->buf = xmalloc(size);
 609                bytes_read = read_in_full(fd, packed_refs->buf, size);
 610                if (bytes_read < 0 || bytes_read != size)
 611                        die_errno("couldn't read %s", packed_refs->refs->path);
 612                packed_refs->eof = packed_refs->buf + size;
 613                packed_refs->mmapped = 0;
 614                break;
 615        case MMAP_TEMPORARY:
 616        case MMAP_OK:
 617                packed_refs->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
 618                packed_refs->eof = packed_refs->buf + size;
 619                packed_refs->mmapped = 1;
 620                break;
 621        }
 622        close(fd);
 623
 624        return 1;
 625}
 626
 627/*
 628 * Find the place in `cache->buf` where the start of the record for
 629 * `refname` starts. If `mustexist` is true and the reference doesn't
 630 * exist, then return NULL. If `mustexist` is false and the reference
 631 * doesn't exist, then return the point where that reference would be
 632 * inserted. In the latter mode, `refname` doesn't have to be a proper
 633 * reference name; for example, one could search for "refs/replace/"
 634 * to find the start of any replace references.
 635 *
 636 * The record is sought using a binary search, so `cache->buf` must be
 637 * sorted.
 638 */
 639static const char *find_reference_location(struct packed_ref_cache *cache,
 640                                           const char *refname, int mustexist)
 641{
 642        /*
 643         * This is not *quite* a garden-variety binary search, because
 644         * the data we're searching is made up of records, and we
 645         * always need to find the beginning of a record to do a
 646         * comparison. A "record" here is one line for the reference
 647         * itself and zero or one peel lines that start with '^'. Our
 648         * loop invariant is described in the next two comments.
 649         */
 650
 651        /*
 652         * A pointer to the character at the start of a record whose
 653         * preceding records all have reference names that come
 654         * *before* `refname`.
 655         */
 656        const char *lo = cache->buf + cache->header_len;
 657
 658        /*
 659         * A pointer to a the first character of a record whose
 660         * reference name comes *after* `refname`.
 661         */
 662        const char *hi = cache->eof;
 663
 664        while (lo < hi) {
 665                const char *mid, *rec;
 666                int cmp;
 667
 668                mid = lo + (hi - lo) / 2;
 669                rec = find_start_of_record(lo, mid);
 670                cmp = cmp_entry_to_refname(rec, refname);
 671                if (cmp < 0) {
 672                        lo = find_end_of_record(mid, hi);
 673                } else if (cmp > 0) {
 674                        hi = rec;
 675                } else {
 676                        return rec;
 677                }
 678        }
 679
 680        if (mustexist)
 681                return NULL;
 682        else
 683                return lo;
 684}
 685
 686/*
 687 * Read from the `packed-refs` file into a newly-allocated
 688 * `packed_ref_cache` and return it. The return value will already
 689 * have its reference count incremented.
 690 *
 691 * A comment line of the form "# pack-refs with: " may contain zero or
 692 * more traits. We interpret the traits as follows:
 693 *
 694 *   Neither `peeled` nor `fully-peeled`:
 695 *
 696 *      Probably no references are peeled. But if the file contains a
 697 *      peeled value for a reference, we will use it.
 698 *
 699 *   `peeled`:
 700 *
 701 *      References under "refs/tags/", if they *can* be peeled, *are*
 702 *      peeled in this file. References outside of "refs/tags/" are
 703 *      probably not peeled even if they could have been, but if we find
 704 *      a peeled value for such a reference we will use it.
 705 *
 706 *   `fully-peeled`:
 707 *
 708 *      All references in the file that can be peeled are peeled.
 709 *      Inversely (and this is more important), any references in the
 710 *      file for which no peeled value is recorded is not peelable. This
 711 *      trait should typically be written alongside "peeled" for
 712 *      compatibility with older clients, but we do not require it
 713 *      (i.e., "peeled" is a no-op if "fully-peeled" is set).
 714 *
 715 *   `sorted`:
 716 *
 717 *      The references in this file are known to be sorted by refname.
 718 */
 719static struct packed_ref_cache *read_packed_refs(struct packed_ref_store *refs)
 720{
 721        struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
 722        struct ref_dir *dir;
 723        struct ref_iterator *iter;
 724        int sorted = 0;
 725        int ok;
 726
 727        packed_refs->refs = refs;
 728        acquire_packed_ref_cache(packed_refs);
 729        packed_refs->cache = create_ref_cache(NULL, NULL);
 730        packed_refs->cache->root->flag &= ~REF_INCOMPLETE;
 731        packed_refs->peeled = PEELED_NONE;
 732
 733        if (!load_contents(packed_refs))
 734                return packed_refs;
 735
 736        /* If the file has a header line, process it: */
 737        if (packed_refs->buf < packed_refs->eof && *packed_refs->buf == '#') {
 738                struct strbuf tmp = STRBUF_INIT;
 739                char *p;
 740                const char *eol;
 741                struct string_list traits = STRING_LIST_INIT_NODUP;
 742
 743                eol = memchr(packed_refs->buf, '\n',
 744                             packed_refs->eof - packed_refs->buf);
 745                if (!eol)
 746                        die_unterminated_line(refs->path,
 747                                              packed_refs->buf,
 748                                              packed_refs->eof - packed_refs->buf);
 749
 750                strbuf_add(&tmp, packed_refs->buf, eol - packed_refs->buf);
 751
 752                if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
 753                        die_invalid_line(refs->path,
 754                                         packed_refs->buf,
 755                                         packed_refs->eof - packed_refs->buf);
 756
 757                string_list_split_in_place(&traits, p, ' ', -1);
 758
 759                if (unsorted_string_list_has_string(&traits, "fully-peeled"))
 760                        packed_refs->peeled = PEELED_FULLY;
 761                else if (unsorted_string_list_has_string(&traits, "peeled"))
 762                        packed_refs->peeled = PEELED_TAGS;
 763
 764                sorted = unsorted_string_list_has_string(&traits, "sorted");
 765
 766                /* perhaps other traits later as well */
 767
 768                /* The "+ 1" is for the LF character. */
 769                packed_refs->header_len = eol + 1 - packed_refs->buf;
 770
 771                string_list_clear(&traits, 0);
 772                strbuf_release(&tmp);
 773        }
 774
 775        verify_buffer_safe(packed_refs);
 776
 777        if (!sorted) {
 778                sort_packed_refs(packed_refs);
 779
 780                /*
 781                 * Reordering the records might have moved a short one
 782                 * to the end of the buffer, so verify the buffer's
 783                 * safety again:
 784                 */
 785                verify_buffer_safe(packed_refs);
 786        }
 787
 788        if (mmap_strategy != MMAP_OK && packed_refs->mmapped) {
 789                /*
 790                 * We don't want to leave the file mmapped, so we are
 791                 * forced to make a copy now:
 792                 */
 793                size_t size = packed_refs->eof -
 794                        (packed_refs->buf + packed_refs->header_len);
 795                char *buf_copy = xmalloc(size);
 796
 797                memcpy(buf_copy, packed_refs->buf + packed_refs->header_len, size);
 798                release_packed_ref_buffer(packed_refs);
 799                packed_refs->buf = buf_copy;
 800                packed_refs->eof = buf_copy + size;
 801        }
 802
 803        dir = get_ref_dir(packed_refs->cache->root);
 804        iter = mmapped_ref_iterator_begin(
 805                        packed_refs,
 806                        packed_refs->buf + packed_refs->header_len,
 807                        packed_refs->eof);
 808        while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
 809                struct ref_entry *entry =
 810                        create_ref_entry(iter->refname, iter->oid, iter->flags);
 811
 812                if ((iter->flags & REF_KNOWS_PEELED))
 813                        ref_iterator_peel(iter, &entry->u.value.peeled);
 814                add_ref_entry(dir, entry);
 815        }
 816
 817        if (ok != ITER_DONE)
 818                die("error reading packed-refs file %s", refs->path);
 819
 820        return packed_refs;
 821}
 822
 823/*
 824 * Check that the packed refs cache (if any) still reflects the
 825 * contents of the file. If not, clear the cache.
 826 */
 827static void validate_packed_ref_cache(struct packed_ref_store *refs)
 828{
 829        if (refs->cache &&
 830            !stat_validity_check(&refs->cache->validity, refs->path))
 831                clear_packed_ref_cache(refs);
 832}
 833
 834/*
 835 * Get the packed_ref_cache for the specified packed_ref_store,
 836 * creating and populating it if it hasn't been read before or if the
 837 * file has been changed (according to its `validity` field) since it
 838 * was last read. On the other hand, if we hold the lock, then assume
 839 * that the file hasn't been changed out from under us, so skip the
 840 * extra `stat()` call in `stat_validity_check()`.
 841 */
 842static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
 843{
 844        if (!is_lock_file_locked(&refs->lock))
 845                validate_packed_ref_cache(refs);
 846
 847        if (!refs->cache)
 848                refs->cache = read_packed_refs(refs);
 849
 850        return refs->cache;
 851}
 852
 853static int packed_read_raw_ref(struct ref_store *ref_store,
 854                               const char *refname, unsigned char *sha1,
 855                               struct strbuf *referent, unsigned int *type)
 856{
 857        struct packed_ref_store *refs =
 858                packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
 859        struct packed_ref_cache *packed_refs = get_packed_ref_cache(refs);
 860        const char *rec;
 861
 862        *type = 0;
 863
 864        rec = find_reference_location(packed_refs, refname, 1);
 865
 866        if (!rec) {
 867                /* refname is not a packed reference. */
 868                errno = ENOENT;
 869                return -1;
 870        }
 871
 872        if (get_sha1_hex(rec, sha1))
 873                die_invalid_line(refs->path, rec, packed_refs->eof - rec);
 874
 875        *type = REF_ISPACKED;
 876        return 0;
 877}
 878
 879struct packed_ref_iterator {
 880        struct ref_iterator base;
 881
 882        struct packed_ref_cache *cache;
 883        struct ref_iterator *iter0;
 884        unsigned int flags;
 885};
 886
 887static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
 888{
 889        struct packed_ref_iterator *iter =
 890                (struct packed_ref_iterator *)ref_iterator;
 891        int ok;
 892
 893        while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
 894                if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
 895                    ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
 896                        continue;
 897
 898                if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
 899                    !ref_resolves_to_object(iter->iter0->refname,
 900                                            iter->iter0->oid,
 901                                            iter->iter0->flags))
 902                        continue;
 903
 904                iter->base.refname = iter->iter0->refname;
 905                iter->base.oid = iter->iter0->oid;
 906                iter->base.flags = iter->iter0->flags;
 907                return ITER_OK;
 908        }
 909
 910        iter->iter0 = NULL;
 911        if (ref_iterator_abort(ref_iterator) != ITER_DONE)
 912                ok = ITER_ERROR;
 913
 914        return ok;
 915}
 916
 917static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
 918                                   struct object_id *peeled)
 919{
 920        struct packed_ref_iterator *iter =
 921                (struct packed_ref_iterator *)ref_iterator;
 922
 923        return ref_iterator_peel(iter->iter0, peeled);
 924}
 925
 926static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
 927{
 928        struct packed_ref_iterator *iter =
 929                (struct packed_ref_iterator *)ref_iterator;
 930        int ok = ITER_DONE;
 931
 932        if (iter->iter0)
 933                ok = ref_iterator_abort(iter->iter0);
 934
 935        release_packed_ref_cache(iter->cache);
 936        base_ref_iterator_free(ref_iterator);
 937        return ok;
 938}
 939
 940static struct ref_iterator_vtable packed_ref_iterator_vtable = {
 941        packed_ref_iterator_advance,
 942        packed_ref_iterator_peel,
 943        packed_ref_iterator_abort
 944};
 945
 946static struct ref_iterator *packed_ref_iterator_begin(
 947                struct ref_store *ref_store,
 948                const char *prefix, unsigned int flags)
 949{
 950        struct packed_ref_store *refs;
 951        struct packed_ref_cache *packed_refs;
 952        const char *start;
 953        struct packed_ref_iterator *iter;
 954        struct ref_iterator *ref_iterator;
 955        unsigned int required_flags = REF_STORE_READ;
 956
 957        if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
 958                required_flags |= REF_STORE_ODB;
 959        refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
 960
 961        iter = xcalloc(1, sizeof(*iter));
 962        ref_iterator = &iter->base;
 963        base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
 964
 965        /*
 966         * Note that get_packed_ref_cache() internally checks whether
 967         * the packed-ref cache is up to date with what is on disk,
 968         * and re-reads it if not.
 969         */
 970        iter->cache = packed_refs = get_packed_ref_cache(refs);
 971        acquire_packed_ref_cache(packed_refs);
 972
 973        if (prefix && *prefix)
 974                start = find_reference_location(packed_refs, prefix, 0);
 975        else
 976                start = packed_refs->buf + packed_refs->header_len;
 977
 978        iter->iter0 = mmapped_ref_iterator_begin(
 979                        packed_refs, start, packed_refs->eof);
 980
 981        iter->flags = flags;
 982
 983        if (prefix && *prefix)
 984                /* Stop iteration after we've gone *past* prefix: */
 985                ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);
 986
 987        return ref_iterator;
 988}
 989
 990/*
 991 * Write an entry to the packed-refs file for the specified refname.
 992 * If peeled is non-NULL, write it as the entry's peeled value. On
 993 * error, return a nonzero value and leave errno set at the value left
 994 * by the failing call to `fprintf()`.
 995 */
 996static int write_packed_entry(FILE *fh, const char *refname,
 997                              const unsigned char *sha1,
 998                              const unsigned char *peeled)
 999{
1000        if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
1001            (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
1002                return -1;
1003
1004        return 0;
1005}
1006
1007int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
1008{
1009        struct packed_ref_store *refs =
1010                packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
1011                                "packed_refs_lock");
1012        static int timeout_configured = 0;
1013        static int timeout_value = 1000;
1014
1015        if (!timeout_configured) {
1016                git_config_get_int("core.packedrefstimeout", &timeout_value);
1017                timeout_configured = 1;
1018        }
1019
1020        /*
1021         * Note that we close the lockfile immediately because we
1022         * don't write new content to it, but rather to a separate
1023         * tempfile.
1024         */
1025        if (hold_lock_file_for_update_timeout(
1026                            &refs->lock,
1027                            refs->path,
1028                            flags, timeout_value) < 0) {
1029                unable_to_lock_message(refs->path, errno, err);
1030                return -1;
1031        }
1032
1033        if (close_lock_file(&refs->lock)) {
1034                strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
1035                return -1;
1036        }
1037
1038        /*
1039         * Now that we hold the `packed-refs` lock, make sure that our
1040         * cache matches the current version of the file. Normally
1041         * `get_packed_ref_cache()` does that for us, but that
1042         * function assumes that when the file is locked, any existing
1043         * cache is still valid. We've just locked the file, but it
1044         * might have changed the moment *before* we locked it.
1045         */
1046        validate_packed_ref_cache(refs);
1047
1048        /*
1049         * Now make sure that the packed-refs file as it exists in the
1050         * locked state is loaded into the cache:
1051         */
1052        get_packed_ref_cache(refs);
1053        return 0;
1054}
1055
1056void packed_refs_unlock(struct ref_store *ref_store)
1057{
1058        struct packed_ref_store *refs = packed_downcast(
1059                        ref_store,
1060                        REF_STORE_READ | REF_STORE_WRITE,
1061                        "packed_refs_unlock");
1062
1063        if (!is_lock_file_locked(&refs->lock))
1064                die("BUG: packed_refs_unlock() called when not locked");
1065        rollback_lock_file(&refs->lock);
1066}
1067
1068int packed_refs_is_locked(struct ref_store *ref_store)
1069{
1070        struct packed_ref_store *refs = packed_downcast(
1071                        ref_store,
1072                        REF_STORE_READ | REF_STORE_WRITE,
1073                        "packed_refs_is_locked");
1074
1075        return is_lock_file_locked(&refs->lock);
1076}
1077
1078/*
1079 * The packed-refs header line that we write out.  Perhaps other
1080 * traits will be added later.
1081 *
1082 * Note that earlier versions of Git used to parse these traits by
1083 * looking for " trait " in the line. For this reason, the space after
1084 * the colon and the trailing space are required.
1085 */
1086static const char PACKED_REFS_HEADER[] =
1087        "# pack-refs with: peeled fully-peeled sorted \n";
1088
1089static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
1090{
1091        /* Nothing to do. */
1092        return 0;
1093}
1094
1095/*
1096 * Write the packed-refs from the cache to the packed-refs tempfile,
1097 * incorporating any changes from `updates`. `updates` must be a
1098 * sorted string list whose keys are the refnames and whose util
1099 * values are `struct ref_update *`. On error, rollback the tempfile,
1100 * write an error message to `err`, and return a nonzero value.
1101 *
1102 * The packfile must be locked before calling this function and will
1103 * remain locked when it is done.
1104 */
1105static int write_with_updates(struct packed_ref_store *refs,
1106                              struct string_list *updates,
1107                              struct strbuf *err)
1108{
1109        struct ref_iterator *iter = NULL;
1110        size_t i;
1111        int ok;
1112        FILE *out;
1113        struct strbuf sb = STRBUF_INIT;
1114        char *packed_refs_path;
1115
1116        if (!is_lock_file_locked(&refs->lock))
1117                die("BUG: write_with_updates() called while unlocked");
1118
1119        /*
1120         * If packed-refs is a symlink, we want to overwrite the
1121         * symlinked-to file, not the symlink itself. Also, put the
1122         * staging file next to it:
1123         */
1124        packed_refs_path = get_locked_file_path(&refs->lock);
1125        strbuf_addf(&sb, "%s.new", packed_refs_path);
1126        free(packed_refs_path);
1127        if (create_tempfile(&refs->tempfile, sb.buf) < 0) {
1128                strbuf_addf(err, "unable to create file %s: %s",
1129                            sb.buf, strerror(errno));
1130                strbuf_release(&sb);
1131                return -1;
1132        }
1133        strbuf_release(&sb);
1134
1135        out = fdopen_tempfile(&refs->tempfile, "w");
1136        if (!out) {
1137                strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
1138                            strerror(errno));
1139                goto error;
1140        }
1141
1142        if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
1143                goto write_error;
1144
1145        /*
1146         * We iterate in parallel through the current list of refs and
1147         * the list of updates, processing an entry from at least one
1148         * of the lists each time through the loop. When the current
1149         * list of refs is exhausted, set iter to NULL. When the list
1150         * of updates is exhausted, leave i set to updates->nr.
1151         */
1152        iter = packed_ref_iterator_begin(&refs->base, "",
1153                                         DO_FOR_EACH_INCLUDE_BROKEN);
1154        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1155                iter = NULL;
1156
1157        i = 0;
1158
1159        while (iter || i < updates->nr) {
1160                struct ref_update *update = NULL;
1161                int cmp;
1162
1163                if (i >= updates->nr) {
1164                        cmp = -1;
1165                } else {
1166                        update = updates->items[i].util;
1167
1168                        if (!iter)
1169                                cmp = +1;
1170                        else
1171                                cmp = strcmp(iter->refname, update->refname);
1172                }
1173
1174                if (!cmp) {
1175                        /*
1176                         * There is both an old value and an update
1177                         * for this reference. Check the old value if
1178                         * necessary:
1179                         */
1180                        if ((update->flags & REF_HAVE_OLD)) {
1181                                if (is_null_oid(&update->old_oid)) {
1182                                        strbuf_addf(err, "cannot update ref '%s': "
1183                                                    "reference already exists",
1184                                                    update->refname);
1185                                        goto error;
1186                                } else if (oidcmp(&update->old_oid, iter->oid)) {
1187                                        strbuf_addf(err, "cannot update ref '%s': "
1188                                                    "is at %s but expected %s",
1189                                                    update->refname,
1190                                                    oid_to_hex(iter->oid),
1191                                                    oid_to_hex(&update->old_oid));
1192                                        goto error;
1193                                }
1194                        }
1195
1196                        /* Now figure out what to use for the new value: */
1197                        if ((update->flags & REF_HAVE_NEW)) {
1198                                /*
1199                                 * The update takes precedence. Skip
1200                                 * the iterator over the unneeded
1201                                 * value.
1202                                 */
1203                                if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1204                                        iter = NULL;
1205                                cmp = +1;
1206                        } else {
1207                                /*
1208                                 * The update doesn't actually want to
1209                                 * change anything. We're done with it.
1210                                 */
1211                                i++;
1212                                cmp = -1;
1213                        }
1214                } else if (cmp > 0) {
1215                        /*
1216                         * There is no old value but there is an
1217                         * update for this reference. Make sure that
1218                         * the update didn't expect an existing value:
1219                         */
1220                        if ((update->flags & REF_HAVE_OLD) &&
1221                            !is_null_oid(&update->old_oid)) {
1222                                strbuf_addf(err, "cannot update ref '%s': "
1223                                            "reference is missing but expected %s",
1224                                            update->refname,
1225                                            oid_to_hex(&update->old_oid));
1226                                goto error;
1227                        }
1228                }
1229
1230                if (cmp < 0) {
1231                        /* Pass the old reference through. */
1232
1233                        struct object_id peeled;
1234                        int peel_error = ref_iterator_peel(iter, &peeled);
1235
1236                        if (write_packed_entry(out, iter->refname,
1237                                               iter->oid->hash,
1238                                               peel_error ? NULL : peeled.hash))
1239                                goto write_error;
1240
1241                        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1242                                iter = NULL;
1243                } else if (is_null_oid(&update->new_oid)) {
1244                        /*
1245                         * The update wants to delete the reference,
1246                         * and the reference either didn't exist or we
1247                         * have already skipped it. So we're done with
1248                         * the update (and don't have to write
1249                         * anything).
1250                         */
1251                        i++;
1252                } else {
1253                        struct object_id peeled;
1254                        int peel_error = peel_object(update->new_oid.hash,
1255                                                     peeled.hash);
1256
1257                        if (write_packed_entry(out, update->refname,
1258                                               update->new_oid.hash,
1259                                               peel_error ? NULL : peeled.hash))
1260                                goto write_error;
1261
1262                        i++;
1263                }
1264        }
1265
1266        if (ok != ITER_DONE) {
1267                strbuf_addf(err, "unable to write packed-refs file: "
1268                            "error iterating over old contents");
1269                goto error;
1270        }
1271
1272        if (close_tempfile(&refs->tempfile)) {
1273                strbuf_addf(err, "error closing file %s: %s",
1274                            get_tempfile_path(&refs->tempfile),
1275                            strerror(errno));
1276                strbuf_release(&sb);
1277                return -1;
1278        }
1279
1280        return 0;
1281
1282write_error:
1283        strbuf_addf(err, "error writing to %s: %s",
1284                    get_tempfile_path(&refs->tempfile), strerror(errno));
1285
1286error:
1287        if (iter)
1288                ref_iterator_abort(iter);
1289
1290        delete_tempfile(&refs->tempfile);
1291        return -1;
1292}
1293
1294struct packed_transaction_backend_data {
1295        /* True iff the transaction owns the packed-refs lock. */
1296        int own_lock;
1297
1298        struct string_list updates;
1299};
1300
1301static void packed_transaction_cleanup(struct packed_ref_store *refs,
1302                                       struct ref_transaction *transaction)
1303{
1304        struct packed_transaction_backend_data *data = transaction->backend_data;
1305
1306        if (data) {
1307                string_list_clear(&data->updates, 0);
1308
1309                if (is_tempfile_active(&refs->tempfile))
1310                        delete_tempfile(&refs->tempfile);
1311
1312                if (data->own_lock && is_lock_file_locked(&refs->lock)) {
1313                        packed_refs_unlock(&refs->base);
1314                        data->own_lock = 0;
1315                }
1316
1317                free(data);
1318                transaction->backend_data = NULL;
1319        }
1320
1321        transaction->state = REF_TRANSACTION_CLOSED;
1322}
1323
1324static int packed_transaction_prepare(struct ref_store *ref_store,
1325                                      struct ref_transaction *transaction,
1326                                      struct strbuf *err)
1327{
1328        struct packed_ref_store *refs = packed_downcast(
1329                        ref_store,
1330                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1331                        "ref_transaction_prepare");
1332        struct packed_transaction_backend_data *data;
1333        size_t i;
1334        int ret = TRANSACTION_GENERIC_ERROR;
1335
1336        /*
1337         * Note that we *don't* skip transactions with zero updates,
1338         * because such a transaction might be executed for the side
1339         * effect of ensuring that all of the references are peeled.
1340         * If the caller wants to optimize away empty transactions, it
1341         * should do so itself.
1342         */
1343
1344        data = xcalloc(1, sizeof(*data));
1345        string_list_init(&data->updates, 0);
1346
1347        transaction->backend_data = data;
1348
1349        /*
1350         * Stick the updates in a string list by refname so that we
1351         * can sort them:
1352         */
1353        for (i = 0; i < transaction->nr; i++) {
1354                struct ref_update *update = transaction->updates[i];
1355                struct string_list_item *item =
1356                        string_list_append(&data->updates, update->refname);
1357
1358                /* Store a pointer to update in item->util: */
1359                item->util = update;
1360        }
1361        string_list_sort(&data->updates);
1362
1363        if (ref_update_reject_duplicates(&data->updates, err))
1364                goto failure;
1365
1366        if (!is_lock_file_locked(&refs->lock)) {
1367                if (packed_refs_lock(ref_store, 0, err))
1368                        goto failure;
1369                data->own_lock = 1;
1370        }
1371
1372        if (write_with_updates(refs, &data->updates, err))
1373                goto failure;
1374
1375        transaction->state = REF_TRANSACTION_PREPARED;
1376        return 0;
1377
1378failure:
1379        packed_transaction_cleanup(refs, transaction);
1380        return ret;
1381}
1382
1383static int packed_transaction_abort(struct ref_store *ref_store,
1384                                    struct ref_transaction *transaction,
1385                                    struct strbuf *err)
1386{
1387        struct packed_ref_store *refs = packed_downcast(
1388                        ref_store,
1389                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1390                        "ref_transaction_abort");
1391
1392        packed_transaction_cleanup(refs, transaction);
1393        return 0;
1394}
1395
1396static int packed_transaction_finish(struct ref_store *ref_store,
1397                                     struct ref_transaction *transaction,
1398                                     struct strbuf *err)
1399{
1400        struct packed_ref_store *refs = packed_downcast(
1401                        ref_store,
1402                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1403                        "ref_transaction_finish");
1404        int ret = TRANSACTION_GENERIC_ERROR;
1405        char *packed_refs_path;
1406
1407        clear_packed_ref_cache(refs);
1408
1409        packed_refs_path = get_locked_file_path(&refs->lock);
1410        if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
1411                strbuf_addf(err, "error replacing %s: %s",
1412                            refs->path, strerror(errno));
1413                goto cleanup;
1414        }
1415
1416        ret = 0;
1417
1418cleanup:
1419        free(packed_refs_path);
1420        packed_transaction_cleanup(refs, transaction);
1421        return ret;
1422}
1423
1424static int packed_initial_transaction_commit(struct ref_store *ref_store,
1425                                            struct ref_transaction *transaction,
1426                                            struct strbuf *err)
1427{
1428        return ref_transaction_commit(transaction, err);
1429}
1430
1431static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
1432                             struct string_list *refnames, unsigned int flags)
1433{
1434        struct packed_ref_store *refs =
1435                packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1436        struct strbuf err = STRBUF_INIT;
1437        struct ref_transaction *transaction;
1438        struct string_list_item *item;
1439        int ret;
1440
1441        (void)refs; /* We need the check above, but don't use the variable */
1442
1443        if (!refnames->nr)
1444                return 0;
1445
1446        /*
1447         * Since we don't check the references' old_oids, the
1448         * individual updates can't fail, so we can pack all of the
1449         * updates into a single transaction.
1450         */
1451
1452        transaction = ref_store_transaction_begin(ref_store, &err);
1453        if (!transaction)
1454                return -1;
1455
1456        for_each_string_list_item(item, refnames) {
1457                if (ref_transaction_delete(transaction, item->string, NULL,
1458                                           flags, msg, &err)) {
1459                        warning(_("could not delete reference %s: %s"),
1460                                item->string, err.buf);
1461                        strbuf_reset(&err);
1462                }
1463        }
1464
1465        ret = ref_transaction_commit(transaction, &err);
1466
1467        if (ret) {
1468                if (refnames->nr == 1)
1469                        error(_("could not delete reference %s: %s"),
1470                              refnames->items[0].string, err.buf);
1471                else
1472                        error(_("could not delete references: %s"), err.buf);
1473        }
1474
1475        ref_transaction_free(transaction);
1476        strbuf_release(&err);
1477        return ret;
1478}
1479
1480static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
1481{
1482        /*
1483         * Packed refs are already packed. It might be that loose refs
1484         * are packed *into* a packed refs store, but that is done by
1485         * updating the packed references via a transaction.
1486         */
1487        return 0;
1488}
1489
1490static int packed_create_symref(struct ref_store *ref_store,
1491                               const char *refname, const char *target,
1492                               const char *logmsg)
1493{
1494        die("BUG: packed reference store does not support symrefs");
1495}
1496
1497static int packed_rename_ref(struct ref_store *ref_store,
1498                            const char *oldrefname, const char *newrefname,
1499                            const char *logmsg)
1500{
1501        die("BUG: packed reference store does not support renaming references");
1502}
1503
1504static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
1505{
1506        return empty_ref_iterator_begin();
1507}
1508
1509static int packed_for_each_reflog_ent(struct ref_store *ref_store,
1510                                      const char *refname,
1511                                      each_reflog_ent_fn fn, void *cb_data)
1512{
1513        return 0;
1514}
1515
1516static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1517                                              const char *refname,
1518                                              each_reflog_ent_fn fn,
1519                                              void *cb_data)
1520{
1521        return 0;
1522}
1523
1524static int packed_reflog_exists(struct ref_store *ref_store,
1525                               const char *refname)
1526{
1527        return 0;
1528}
1529
1530static int packed_create_reflog(struct ref_store *ref_store,
1531                               const char *refname, int force_create,
1532                               struct strbuf *err)
1533{
1534        die("BUG: packed reference store does not support reflogs");
1535}
1536
1537static int packed_delete_reflog(struct ref_store *ref_store,
1538                               const char *refname)
1539{
1540        return 0;
1541}
1542
1543static int packed_reflog_expire(struct ref_store *ref_store,
1544                                const char *refname, const unsigned char *sha1,
1545                                unsigned int flags,
1546                                reflog_expiry_prepare_fn prepare_fn,
1547                                reflog_expiry_should_prune_fn should_prune_fn,
1548                                reflog_expiry_cleanup_fn cleanup_fn,
1549                                void *policy_cb_data)
1550{
1551        return 0;
1552}
1553
1554struct ref_storage_be refs_be_packed = {
1555        NULL,
1556        "packed",
1557        packed_ref_store_create,
1558        packed_init_db,
1559        packed_transaction_prepare,
1560        packed_transaction_finish,
1561        packed_transaction_abort,
1562        packed_initial_transaction_commit,
1563
1564        packed_pack_refs,
1565        packed_create_symref,
1566        packed_delete_refs,
1567        packed_rename_ref,
1568
1569        packed_ref_iterator_begin,
1570        packed_read_raw_ref,
1571
1572        packed_reflog_iterator_begin,
1573        packed_for_each_reflog_ent,
1574        packed_for_each_reflog_ent_reverse,
1575        packed_reflog_exists,
1576        packed_create_reflog,
1577        packed_delete_reflog,
1578        packed_reflog_expire
1579};