preload-index.con commit status: show progress bar if refreshing the index takes too long (ae9af12)
   1/*
   2 * Copyright (C) 2008 Linus Torvalds
   3 */
   4#include "cache.h"
   5#include "pathspec.h"
   6#include "dir.h"
   7#include "fsmonitor.h"
   8#include "progress.h"
   9
  10#ifdef NO_PTHREADS
  11static void preload_index(struct index_state *index,
  12                          const struct pathspec *pathspec,
  13                          unsigned int refresh_flags)
  14{
  15        ; /* nothing */
  16}
  17#else
  18
  19#include <pthread.h>
  20
  21/*
  22 * Mostly randomly chosen maximum thread counts: we
  23 * cap the parallelism to 20 threads, and we want
  24 * to have at least 500 lstat's per thread for it to
  25 * be worth starting a thread.
  26 */
  27#define MAX_PARALLEL (20)
  28#define THREAD_COST (500)
  29
  30struct progress_data {
  31        unsigned long n;
  32        struct progress *progress;
  33        pthread_mutex_t mutex;
  34};
  35
  36struct thread_data {
  37        pthread_t pthread;
  38        struct index_state *index;
  39        struct pathspec pathspec;
  40        struct progress_data *progress;
  41        int offset, nr;
  42};
  43
  44static void *preload_thread(void *_data)
  45{
  46        int nr, last_nr;
  47        struct thread_data *p = _data;
  48        struct index_state *index = p->index;
  49        struct cache_entry **cep = index->cache + p->offset;
  50        struct cache_def cache = CACHE_DEF_INIT;
  51
  52        nr = p->nr;
  53        if (nr + p->offset > index->cache_nr)
  54                nr = index->cache_nr - p->offset;
  55        last_nr = nr;
  56
  57        do {
  58                struct cache_entry *ce = *cep++;
  59                struct stat st;
  60
  61                if (ce_stage(ce))
  62                        continue;
  63                if (S_ISGITLINK(ce->ce_mode))
  64                        continue;
  65                if (ce_uptodate(ce))
  66                        continue;
  67                if (ce_skip_worktree(ce))
  68                        continue;
  69                if (ce->ce_flags & CE_FSMONITOR_VALID)
  70                        continue;
  71                if (p->progress && !(nr & 31)) {
  72                        struct progress_data *pd = p->progress;
  73
  74                        pthread_mutex_lock(&pd->mutex);
  75                        pd->n += last_nr - nr;
  76                        display_progress(pd->progress, pd->n);
  77                        pthread_mutex_unlock(&pd->mutex);
  78                        last_nr = nr;
  79                }
  80                if (!ce_path_match(index, ce, &p->pathspec, NULL))
  81                        continue;
  82                if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce)))
  83                        continue;
  84                if (lstat(ce->name, &st))
  85                        continue;
  86                if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY|CE_MATCH_IGNORE_FSMONITOR))
  87                        continue;
  88                ce_mark_uptodate(ce);
  89                mark_fsmonitor_valid(ce);
  90        } while (--nr > 0);
  91        if (p->progress) {
  92                struct progress_data *pd = p->progress;
  93
  94                pthread_mutex_lock(&pd->mutex);
  95                display_progress(pd->progress, pd->n + last_nr);
  96                pthread_mutex_unlock(&pd->mutex);
  97        }
  98        cache_def_clear(&cache);
  99        return NULL;
 100}
 101
 102static void preload_index(struct index_state *index,
 103                          const struct pathspec *pathspec,
 104                          unsigned int refresh_flags)
 105{
 106        int threads, i, work, offset;
 107        struct thread_data data[MAX_PARALLEL];
 108        uint64_t start = getnanotime();
 109        struct progress_data pd;
 110
 111        if (!core_preload_index)
 112                return;
 113
 114        threads = index->cache_nr / THREAD_COST;
 115        if ((index->cache_nr > 1) && (threads < 2) && getenv("GIT_FORCE_PRELOAD_TEST"))
 116                threads = 2;
 117        if (threads < 2)
 118                return;
 119        if (threads > MAX_PARALLEL)
 120                threads = MAX_PARALLEL;
 121        offset = 0;
 122        work = DIV_ROUND_UP(index->cache_nr, threads);
 123        memset(&data, 0, sizeof(data));
 124
 125        memset(&pd, 0, sizeof(pd));
 126        if (refresh_flags & REFRESH_PROGRESS && isatty(2)) {
 127                pd.progress = start_delayed_progress(_("Refreshing index"), index->cache_nr);
 128                pthread_mutex_init(&pd.mutex, NULL);
 129        }
 130
 131        for (i = 0; i < threads; i++) {
 132                struct thread_data *p = data+i;
 133                p->index = index;
 134                if (pathspec)
 135                        copy_pathspec(&p->pathspec, pathspec);
 136                p->offset = offset;
 137                p->nr = work;
 138                if (pd.progress)
 139                        p->progress = &pd;
 140                offset += work;
 141                if (pthread_create(&p->pthread, NULL, preload_thread, p))
 142                        die("unable to create threaded lstat");
 143        }
 144        for (i = 0; i < threads; i++) {
 145                struct thread_data *p = data+i;
 146                if (pthread_join(p->pthread, NULL))
 147                        die("unable to join threaded lstat");
 148        }
 149        stop_progress(&pd.progress);
 150
 151        trace_performance_since(start, "preload index");
 152}
 153#endif
 154
 155int read_index_preload(struct index_state *index,
 156                       const struct pathspec *pathspec,
 157                       unsigned int refresh_flags)
 158{
 159        int retval = read_index(index);
 160
 161        preload_index(index, pathspec, refresh_flags);
 162        return retval;
 163}